diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index d9596fe3..29ba1cce 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -24,6 +24,11 @@ jobs: with: node-version: "lts/*" + - name: Setup helm + uses: azure/setup-helm@v3 + with: + version: "v3.11.1" + - name: Release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -32,7 +37,5 @@ jobs: #!/bin/bash set -e # add more plugins: https://semantic-release.gitbook.io/semantic-release/extending/plugins-list - npm install @semantic-release/exec @eclass/semantic-release-docker @semantic-release/git -D - # the ... || true is to prevent the script from failing caused by a bug in semantic-release regarding - # rate limit changes on github. See https://github.com/semantic-release/github/pull/487 - npx -c semantic-release || true + npm install @semantic-release/exec @eclass/semantic-release-docker @semantic-release/git semantic-release-helm3 -D + npx -c semantic-release diff --git a/.releaserc.json b/.releaserc.json index 6a08cc90..fa9c5264 100644 --- a/.releaserc.json +++ b/.releaserc.json @@ -201,18 +201,56 @@ "prepareCmd": "sed -i \"s#ghcr.io/edgefarm/edgefarm.network/xfn-log2webhook:.*#ghcr.io/edgefarm/edgefarm.network/xfn-log2webhook:${nextRelease.version}#g\" crossplane-functions/log2webhook/Readme.md" } ], + [ + "semantic-release-helm3", + { + "chartPath": "./charts/network-compositions-helm", + "registry": "ghcr.io/edgefarm/edgefarm.network" + } + ], + [ + "semantic-release-helm3", + { + "chartPath": "./charts/network-dependencies-webhook-helm", + "registry": "ghcr.io/edgefarm/edgefarm.network" + } + ], + [ + "semantic-release-helm3", + { + "chartPath": "./charts/network-resource-info-helm", + "registry": "ghcr.io/edgefarm/edgefarm.network" + } + ], + [ + "semantic-release-helm3", + { + "chartPath": "./charts/provider-kubernetes-helm", + "registry": "ghcr.io/edgefarm/edgefarm.network" + } + ], [ "@semantic-release/git", { "assets": [ "deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml", "deploy/network-resource-info/deployment.yaml", - "deploy/network-dependencies-webhook/deployment.yaml" + "deploy/network-dependencies-webhook/deployment.yaml", + "charts/network-compositions-helm/Chart.yaml", + "charts/network-dependencies-webhook-helm/Chart.yaml", + "charts/network-resource-info-helm/Chart.yaml", + "charts/provider-kubernetes-helm/Chart.yaml" ], "message": "chore(release): ${nextRelease.version} [skip ci]\n\n${nextRelease.notes}" } ], "@semantic-release/release-notes-generator", - "@semantic-release/github" + [ + "@semantic-release/github", + { + "successComment": false, + "failTitle": false + } + ] ] } \ No newline at end of file diff --git a/charts/edgefarm-network/Chart.lock b/charts/edgefarm-network/Chart.lock new file mode 100644 index 00000000..0ae77d6c --- /dev/null +++ b/charts/edgefarm-network/Chart.lock @@ -0,0 +1,27 @@ +dependencies: +- name: provider-kubernetes + repository: file://../provider-kubernetes + version: 0.1.0 +- name: network-resource-info + repository: file://../network-resource-info + version: 0.1.0 +- name: network-compositions + repository: file://../network-compositions + version: 0.1.0 +- name: edgenetwork-operator + repository: file://../edgenetwork-operator + version: 0.1.0 +- name: provider-nats + repository: file://../provider-nats + version: 0.1.0 +- name: provider-natssecrets + repository: file://../provider-natssecrets + version: 0.1.0 +- name: network-dependencies-webhook + repository: file://../network-dependencies-webhook + version: 0.1.0 +- name: nats + repository: https://nats-io.github.io/k8s/helm/charts/ + version: 1.0.0-rc.0 +digest: sha256:05db8f71bb857e38d396bedd44b45c0fbd908e18f8ca615d612d5a0b915e9474 +generated: "2023-07-07T08:54:05.131453431+02:00" diff --git a/charts/edgefarm-network/Chart.yaml b/charts/edgefarm-network/Chart.yaml new file mode 100644 index 00000000..1d715329 --- /dev/null +++ b/charts/edgefarm-network/Chart.yaml @@ -0,0 +1,48 @@ +apiVersion: v2 +name: edgefarm-network +description: A Helm chart for edgefarm.network. Note that you need to have metacontroller, crossplane, vault with installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "2.1.0" + +keywords: + - edgefarm.network + - crd + - metacontroller + - crossplane + - nats + - credentials + +sources: + - https://github.com/edgefarm/edgefarm.network + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de + +dependencies: + - name: provider-kubernetes + version: 0.1.0 + repository: "file://../provider-kubernetes" + - name: network-resource-info + version: 0.1.0 + repository: "file://../network-resource-info" + - name: network-compositions + version: 0.1.0 + repository: "file://../network-compositions" + - name: edgenetwork-operator + version: 0.1.0 + repository: "file://../edgenetwork-operator" + - name: provider-nats + version: 0.1.0 + repository: "file://../provider-nats" + - name: provider-natssecrets + version: 0.1.0 + repository: "file://../provider-natssecrets" + - name: network-dependencies-webhook + version: 0.1.0 + repository: "file://../network-dependencies-webhook" + - name: nats + version: 1.0.0-rc.0 + repository: https://nats-io.github.io/k8s/helm/charts/ diff --git a/charts/edgefarm-network/README.md b/charts/edgefarm-network/README.md new file mode 100644 index 00000000..fa9b645a --- /dev/null +++ b/charts/edgefarm-network/README.md @@ -0,0 +1,89 @@ +# edgefarm.network + +This helm chart installs edgefarm.network components. It installs several components: + - provider-kubernetes + - provider-nats + - provider-natssecrets + - network-compositions + - network-resource-info + - edgenetwork-operator + - network-dependencies-webhook + - nats + +## Prerequisites + + Kubernetes 1.22+ + Helm 3.2.0+ + Crossplane 1.11.3+ + Vault with vault-plugin-secrets-nats 1.3.2+ + +# Needed Vault configuration + +To be able to make this work a few things things have to be done in the vault configuration + +## 1. configure kubernetes auth + +See https://developer.hashicorp.com/vault/docs/auth/kubernetes and +https://developer.hashicorp.com/vault/docs/auth/kubernetes#use-local-service-account-token-as-the-reviewer-jwt for more information. + +If your Vault service runs in the same cluster, you simply can put the `kubernetes_host` to `https://10.96.0.1`. + +``` +$ KUBE_CA_CERT=$(kubectl config view --raw --minify --flatten --output='jsonpath={.clusters[].cluster.certificate-authority-data}' | base64 --decode) +$ vault write auth/kubernetes/config kubernetes_host=https://10.96.0.1 kubernetes_ca_cert="$KUBE_CA_CERT" disable_local_ca_jwt="true" +``` + +Otherwise, you need to modify `kubernetes_host` and pass the correct CA cert. + +## 2. create policy + +Replace each occurance of `` with the name of your operator: + +```console +vault policy write nats-auth-config - <" { + capabilities = ["read"] +} +path "nats-secrets/nkey/operator//account/sys" { + capabilities = ["read"] +} +path "nats-secrets/jwt/operator//account/sys" { + capabilities = ["read"] +} +EOF +``` + +## 3. create role + +By creating the kubernetes role, the service account for `nats-auth-config` is allowed to access paths specified in the policy `nats-auth-config`. + +``` console +vault write auth/kubernetes/role/nats-auth-config bound_service_account_names=nats-auth-config bound_service_account_namespaces="*" policies=nats-auth-config ttl=24h +``` + +# Chart configuration + +You can deploy backend and core components independently by enabling/disabling them: + +| Component | Description | Default value | +| ------------------------- | ---------------------------------------------------------------- | ------------- | +| operatorName | Specifies the name of the nats operator | true | +| natsAuthConfig.enabled | Specifies if the backend cluster parts should be deployed | true | +| networkBaseConfig.enabled | Specifies if the network base config configmap should be created | true | +| operator.enabled | Specifies if the operator should be created | true | +| sysAccount.enabled | Specifies if the sys-account should be created | true | + + +## natsAuthConfig configuration + +| Component | Description | Default value | +| ------------------------------------------------------ | ----------------------------------------------------------------------------- | ------------------------------ | +| natsAuthConfig.resolver.address | Specifies the address of the nats server | "nats://nats.default.svc:4222" | +| natsAuthConfig.resolver.config.type | Specifies the type of the nats resolver | full | +| natsAuthConfig.resolver.config.dir | Specifies the directory to cache JWTs | "/data/jwt" | +| natsAuthConfig.resolver.config.allow_delete | Specifies if account information can be deleted | true | +| natsAuthConfig.resolver.config.interval | Specifies the resolver interval | "2m" | +| natsAuthConfig.resolver.config.timeout | Specifies the resolver timeout | "1.9s" | +| natsAuthConfig.nats.authConfigmapDestination.name | Specifies the name of the configmap where the auth config will be stored | nats-auth-config | +| natsAuthConfig.nats.authConfigmapDestination.namespace | Specifies the namespace of the configmap where the auth config will be stored | nats | + diff --git a/charts/edgefarm-network/charts/edgenetwork-operator-0.1.0.tgz b/charts/edgefarm-network/charts/edgenetwork-operator-0.1.0.tgz new file mode 100644 index 00000000..3aac5c35 Binary files /dev/null and b/charts/edgefarm-network/charts/edgenetwork-operator-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/nats-1.0.0-rc.0.tgz b/charts/edgefarm-network/charts/nats-1.0.0-rc.0.tgz new file mode 100644 index 00000000..6513e51a Binary files /dev/null and b/charts/edgefarm-network/charts/nats-1.0.0-rc.0.tgz differ diff --git a/charts/edgefarm-network/charts/network-compositions-0.1.0.tgz b/charts/edgefarm-network/charts/network-compositions-0.1.0.tgz new file mode 100644 index 00000000..54e7b45d Binary files /dev/null and b/charts/edgefarm-network/charts/network-compositions-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/network-dependencies-webhook-0.1.0.tgz b/charts/edgefarm-network/charts/network-dependencies-webhook-0.1.0.tgz new file mode 100644 index 00000000..53ba1654 Binary files /dev/null and b/charts/edgefarm-network/charts/network-dependencies-webhook-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/network-resource-info-0.1.0.tgz b/charts/edgefarm-network/charts/network-resource-info-0.1.0.tgz new file mode 100644 index 00000000..8a76342f Binary files /dev/null and b/charts/edgefarm-network/charts/network-resource-info-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/provider-kubernetes-0.1.0.tgz b/charts/edgefarm-network/charts/provider-kubernetes-0.1.0.tgz new file mode 100644 index 00000000..a5653b54 Binary files /dev/null and b/charts/edgefarm-network/charts/provider-kubernetes-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/provider-nats-0.1.0.tgz b/charts/edgefarm-network/charts/provider-nats-0.1.0.tgz new file mode 100644 index 00000000..010a944d Binary files /dev/null and b/charts/edgefarm-network/charts/provider-nats-0.1.0.tgz differ diff --git a/charts/edgefarm-network/charts/provider-natssecrets-0.1.0.tgz b/charts/edgefarm-network/charts/provider-natssecrets-0.1.0.tgz new file mode 100644 index 00000000..4fd1d2e5 Binary files /dev/null and b/charts/edgefarm-network/charts/provider-natssecrets-0.1.0.tgz differ diff --git a/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_accounts.yaml b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_accounts.yaml new file mode 100644 index 00000000..6c2e5e2c --- /dev/null +++ b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_accounts.yaml @@ -0,0 +1,633 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: accounts.issue.natssecrets.crossplane.io +spec: + group: issue.natssecrets.crossplane.io + names: + categories: + - crossplane + - managed + - natssecrets + kind: Account + listKind: AccountList + plural: accounts + singular: account + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.atProvider.operator + name: OPERATOR + type: string + - jsonPath: .status.atProvider.nkey + name: NKEY + priority: 1 + type: string + - jsonPath: .status.atProvider.jwt + name: JWT + priority: 1 + type: string + - jsonPath: .status.atProvider.pushed + name: PUSHED + priority: 1 + type: string + - jsonPath: .status.atProvider.lastPushed + name: LAST PUSHED + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A Account is an example API type. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A AccountSpec defines the desired state of a Account. + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + description: AccountParameters are the configurable fields of a Account. + properties: + claims: + description: Specifies claims of the JWT + properties: + account: + description: Account specific claims + properties: + defaultPermissions: + description: Default pub/sub permissions for this account + that users inherit + properties: + pub: + description: Specifies the publish permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + resp: + description: Specifies the response permissions + properties: + max: + description: The maximum number of messages + type: integer + ttl: + description: Specifies the time to live for the + response + type: string + required: + - max + - ttl + type: object + sub: + description: Specifies the subscribe permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + type: object + description: + description: A human readable description + type: string + exports: + description: A list of account/subject combinations that + this account is allowed to export + items: + description: Export describes a mapping from this account + to another one + properties: + accountTokenPosition: + description: The account token position for the + export + type: integer + advertise: + description: Specifies if the export is advertised + type: boolean + description: + description: A human readable description + type: string + infoURL: + description: This is a URL to more information + type: string + name: + description: The name of the export + type: string + responseThreshold: + description: The response threshold for the export + type: string + responseType: + description: The response type for the export + type: string + revocations: + additionalProperties: + format: int64 + type: integer + description: The revocations for the export + type: object + serviceLatency: + description: The latency for the export. + properties: + results: + description: Specifies the results for the latency + type: string + sampling: + description: Specifies the sampling for the + latency + type: integer + required: + - results + - sampling + type: object + subject: + description: The subject to export + type: string + tokenReq: + description: Specifies if a token is required for + the export + type: boolean + type: + description: The type of the export + type: string + type: object + type: array + imports: + description: A list of account/subject combinations that + this account is allowed to import + items: + description: Import describes a mapping from another + account into this one + properties: + account: + description: The account to import from + type: string + localSubject: + description: The local subject to import to + type: string + name: + description: The name of the import + type: string + share: + description: Specifies if the import is shared + type: boolean + subject: + description: The subject to import + type: string + token: + description: The token to use for the import + type: string + type: + description: The type of the import + type: string + type: object + type: array + infoURL: + description: This is a URL to more information + type: string + limits: + description: A set of limits for this account + properties: + conn: + description: Max number of connections + format: int64 + type: integer + consumer: + description: Max number of consumers + format: int64 + type: integer + data: + description: Specifies the maximum number of bytes + format: int64 + type: integer + disallowBearer: + description: Specifies that user JWT can't be bearer + token + type: boolean + diskMaxStreamBytes: + default: 0 + description: Max number of bytes a stream can have + on disk. (0 means unlimited) + format: int64 + type: integer + diskStorage: + description: Max number of bytes stored on disk across + all streams. (0 means disabled) + format: int64 + type: integer + exports: + description: Max number of exports + format: int64 + type: integer + imports: + description: Max number of imports + format: int64 + type: integer + leafNodeConn: + description: Max number of leaf node connections + format: int64 + type: integer + maxAckPending: + description: Max number of acks pending + format: int64 + type: integer + maxBytesRequired: + description: Max bytes required by all Streams + type: boolean + memMaxStreamBytes: + default: 0 + description: Max number of bytes a stream can have + in memory. (0 means unlimited) + format: int64 + type: integer + memStorage: + description: Max number of bytes stored in memory + across all streams. (0 means disabled) + format: int64 + type: integer + payload: + description: Specifies the maximum message payload + format: int64 + type: integer + streams: + description: Max number of streams + format: int64 + type: integer + subs: + description: Specifies the maximum number of subscriptions + format: int64 + type: integer + wildcardExports: + description: Specifies if wildcards are allowed in + exports + type: boolean + type: object + mappings: + additionalProperties: + items: + description: WeightedMapping is a mapping from one + subject to another with a weight and a destination + cluster + properties: + cluster: + description: The cluster to map to + type: string + subject: + description: The subject to map to + type: string + weight: + description: The amount of 100% that this mapping + should be used + type: integer + required: + - subject + type: object + type: array + description: Stores subjects that get mapped to other + subjects using a weighted mapping. For more information + see https://docs.nats.io/nats-concepts/subject_mapping + type: object + revocations: + additionalProperties: + format: int64 + type: integer + description: Stores user JWTs that have been revoked and + the time they were revoked + type: object + signingKeys: + description: A list of signing keys the account can use + items: + type: string + type: array + tags: + description: Do not set manually + items: + type: string + type: array + type: + description: Do not set manually + type: string + version: + description: Do not set manually + type: integer + type: object + aud: + description: Do not set manually + type: string + exp: + description: Do not set manually + format: int64 + type: integer + iat: + description: Do not set manually + format: int64 + type: integer + iss: + description: Do not set manually + type: string + jti: + description: Do not set manually + type: string + name: + description: Do not set manually + type: string + nbf: + description: Do not set manually + format: int64 + type: integer + sub: + description: Do not set manually + type: string + type: object + operator: + type: string + useSigningKey: + type: string + required: + - operator + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: A AccountStatus represents the observed state of a Account. + properties: + atProvider: + description: AccountObservation are the observable fields of a Account. + properties: + account: + type: string + issue: + type: string + jwt: + type: string + jwtPath: + type: string + lastPushed: + type: string + nkey: + type: string + nkeyPath: + type: string + operator: + type: string + pushed: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_operators.yaml b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_operators.yaml new file mode 100644 index 00000000..59b4aae6 --- /dev/null +++ b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_operators.yaml @@ -0,0 +1,390 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: operators.issue.natssecrets.crossplane.io +spec: + group: issue.natssecrets.crossplane.io + names: + categories: + - crossplane + - managed + - natssecrets + kind: Operator + listKind: OperatorList + plural: operators + singular: operator + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.atProvider.nkey + name: NKEY + priority: 1 + type: string + - jsonPath: .status.atProvider.jwt + name: JWT + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A Operator is an example API type. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A OperatorSpec defines the desired state of a Operator. + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + description: OperatorParameters are the configurable fields of an + Operator. + properties: + claims: + description: Specifies claims of the JWT + properties: + aud: + description: Do not set manually + type: string + exp: + description: Do not set manually + format: int64 + type: integer + iat: + description: Do not set manually + format: int64 + type: integer + iss: + description: Do not set manually + type: string + jti: + description: Do not set manually + type: string + name: + description: Do not set manually + type: string + nbf: + description: Do not set manually + format: int64 + type: integer + operator: + description: Operator specific claims + properties: + accountServerUrl: + description: AccountServerURL is a partial URL like "https://host.domain.org:/jwt/v1" + tools will use the prefix and build queries by appending + /accounts/ or /operator to the path provided. + Note this assumes that the account server can handle + requests in a nats-account-server compatible way. See + https://github.com/nats-io/nats-account-server. + type: string + assertServerVersion: + description: Min Server version + type: string + operatorServiceUrls: + description: A list of NATS urls (tls://host:port) where + tools can connect to the server using proper credentials. + items: + type: string + type: array + signingKeys: + description: Slice of other operator NKey names that can + be used to sign on behalf of the main operator identity. + items: + type: string + type: array + strictSigningKeyUsage: + description: Signing of subordinate objects will require + signing keys + type: boolean + systemAccount: + description: Identity of the system account by its name + type: string + tags: + description: Do not set manually + items: + type: string + type: array + type: + description: Do not set manually + type: string + version: + description: Do not set manually + type: integer + type: object + sub: + description: Do not set manually + type: string + type: object + createSystemAccount: + type: boolean + syncAccountServer: + type: boolean + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: A OperatorStatus represents the observed state of a Operator. + properties: + atProvider: + description: OperatorObservation are the observable fields of a Operator. + properties: + issue: + type: string + jwt: + type: string + jwtPath: + type: string + nkey: + type: string + nkeyPath: + type: string + operator: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + status: + description: Status of this instance. + type: string + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_users.yaml b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_users.yaml new file mode 100644 index 00000000..d05cce7d --- /dev/null +++ b/charts/edgefarm-network/crds/issue.natssecrets.crossplane.io_users.yaml @@ -0,0 +1,472 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: users.issue.natssecrets.crossplane.io +spec: + group: issue.natssecrets.crossplane.io + names: + categories: + - crossplane + - managed + - natssecrets + kind: User + listKind: UserList + plural: users + singular: user + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.atProvider.operator + name: OPERATOR + type: string + - jsonPath: .status.atProvider.account + name: ACCOUNT + type: string + - jsonPath: .status.atProvider.nkey + name: NKEY + priority: 1 + type: string + - jsonPath: .status.atProvider.jwt + name: JWT + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A User is an example API type. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A UserSpec defines the desired state of a User. + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + description: UserParameters are the configurable fields of a User. + properties: + account: + type: string + claims: + description: Specifies claims of the JWT + properties: + aud: + description: Do not set manually + type: string + exp: + description: Do not set manually + format: int64 + type: integer + iat: + description: Do not set manually + format: int64 + type: integer + iss: + description: Do not set manually + type: string + jti: + description: Do not set manually + type: string + name: + description: Do not set manually + type: string + nbf: + description: Do not set manually + format: int64 + type: integer + sub: + description: Do not set manually + type: string + user: + description: Specifies the user specific part of the JWT + properties: + allowedConnectionTypes: + description: Specifies the allowed connection types for + this user Allowed values are STANDARD, WEBSOCKET, LEAFNODE, + LEAFNODE_WS, MQTT, MQTT_WS + enum: + - STANDARD + - WEBSOCKET + - LEAFNODE + - LEAFNODE_WS + - MQTT + - MQTT_WS + items: + type: string + type: array + bearerToken: + description: Specifies if this user is allowed to use + a bearer token to connect + type: boolean + data: + description: Specifies the maximum number of bytes + format: int64 + type: integer + issuerAccount: + description: The account that issued this user JWT + type: string + payload: + description: Specifies the maximum message payload + format: int64 + type: integer + pub: + description: Specifies the publish permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + resp: + description: Specifies the response permissions + properties: + max: + description: The maximum number of messages + type: integer + ttl: + description: Specifies the time to live for the response + type: string + required: + - max + - ttl + type: object + src: + description: 'A list of CIDR specifications the user is + allowed to connect from Example: 192.168.1.0/24, 192.168.1.1/1 + or 2001:db8:a0b:12f0::1/32' + items: + type: string + type: array + sub: + description: Specifies the subscribe permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + subs: + description: Specifies the maximum number of subscriptions + format: int64 + type: integer + tags: + description: Do not set manually + items: + type: string + type: array + times: + description: Represents allowed time ranges the user is + allowed to interact with the system + items: + properties: + end: + description: The end time in the format HH:MM:SS + pattern: ^(((([0-1][0-9])|(2[0-3])):?[0-5][0-9]:?[0-5][0-9]+$)) + type: string + start: + description: The start time in the format HH:MM:SS + pattern: ^(((([0-1][0-9])|(2[0-3])):?[0-5][0-9]:?[0-5][0-9]+$)) + type: string + type: object + type: array + timesLocation: + description: The locale for the times in the format "Europe/Berlin" + type: string + type: + description: Do not set manually + type: string + version: + description: Do not set manually + type: integer + type: object + type: object + operator: + type: string + useSigningKey: + type: string + required: + - account + - operator + type: object + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: A UserStatus represents the observed state of a User. + properties: + atProvider: + description: UserObservation are the observable fields of a User. + properties: + account: + type: string + creds: + type: string + issue: + type: string + jwt: + type: string + jwtPath: + type: string + nkey: + type: string + nkeyPath: + type: string + operator: + type: string + user: + type: string + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/edgefarm-network/templates/_helpers.tpl b/charts/edgefarm-network/templates/_helpers.tpl new file mode 100644 index 00000000..0d891322 --- /dev/null +++ b/charts/edgefarm-network/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "edgefarm-network.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "edgefarm-network.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "edgefarm-network.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "edgefarm-network.labels" -}} +helm.sh/chart: {{ include "edgefarm-network.chart" . }} +{{ include "edgefarm-network.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "edgefarm-network.selectorLabels" -}} +app.kubernetes.io/name: {{ include "edgefarm-network.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + diff --git a/charts/edgefarm-network/templates/nats-auth-config.yaml b/charts/edgefarm-network/templates/nats-auth-config.yaml new file mode 100644 index 00000000..e14b72dd --- /dev/null +++ b/charts/edgefarm-network/templates/nats-auth-config.yaml @@ -0,0 +1,134 @@ +{{- if .Values.natsAuthConfig.enabled }} +apiVersion: batch/v1 +kind: Job +metadata: + name: nats-auth-config + labels: + app: nats-auth-config + {{- include "edgefarm-network.labels" . | nindent 4 }} + namespace: {{ .Values.natsAuthConfig.namespace }} +spec: + backoffLimit: 20 + template: + metadata: + annotations: + vault.security.banzaicloud.io/vault-addr: "https://vault.vault:8200" + vault.security.banzaicloud.io/vault-skip-verify: "true" + labels: + app: nats-auth-config + spec: + serviceAccount: nats-auth-config + serviceAccountName: nats-auth-config + restartPolicy: Never + containers: + - name: create-nats-auth-config + image: bitnami/kubectl:1.22 + command: + [ + "bash", + "-c", + 'kubectl get namespace {{ .Values.natsAuthConfig.nats.authConfigmapDestination.namespace }} || kubectl create namespace {{ .Values.natsAuthConfig.nats.authConfigmapDestination.namespace }} && filled=$(echo "$TEMPLATE" | sed "s/\${OPERATOR_JWT}/${OPERATOR_JWT}/g" | sed "s/\${SYS_ACCOUNT_PUBLIC_KEY}/${SYS_ACCOUNT_PUBLIC_KEY}/g" | sed "s/\${SYS_ACCOUNT_JWT}/${SYS_ACCOUNT_JWT}/g") && kubectl create configmap {{ .Values.natsAuthConfig.nats.authConfigmapDestination.name }} --namespace {{ .Values.natsAuthConfig.nats.authConfigmapDestination.namespace }} --from-literal=auth.config="${filled}"', + ] + env: + - name: OPERATOR_JWT + value: "vault:nats-secrets/jwt/operator/{{ .Values.operatorName }}#jwt" + - name: SYS_ACCOUNT_JWT + value: "vault:nats-secrets/jwt/operator/{{ .Values.operatorName }}/account/sys#jwt" + - name: SYS_ACCOUNT_PUBLIC_KEY + value: "vault:nats-secrets/nkey/operator/{{ .Values.operatorName }}/account/sys#publicKey" + - name: TEMPLATE + valueFrom: + configMapKeyRef: + name: nats-auth-config-template + key: config + resources: + requests: + cpu: 100m + memory: 128Mi + limits: + cpu: 100m + memory: 128Mi +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: nats-auth-config-template + namespace: {{ .Values.natsAuthConfig.namespace }} + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +data: + config: |- + { + "operator": "${OPERATOR_JWT}", + "system_account": "${SYS_ACCOUNT_PUBLIC_KEY}", + "resolver": { + "type": "{{ .Values.natsAuthConfig.nats.resolver.config.type }}", + "dir": "{{ .Values.natsAuthConfig.nats.resolver.config.dir }}", + "allow_delete": {{ .Values.natsAuthConfig.nats.resolver.config.allow_delete }}, + "interval": "{{ .Values.natsAuthConfig.nats.resolver.config.interval }}", + "timeout": "{{ .Values.natsAuthConfig.nats.resolver.config.timeout }}" + }, + "resolver_preload": { + "${SYS_ACCOUNT_PUBLIC_KEY}": "${SYS_ACCOUNT_JWT}" + } + } +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: nats-auth-config + namespace: {{ .Values.natsAuthConfig.namespace }} + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: nats-auth-config + namespace: {{ .Values.natsAuthConfig.nats.authConfigmapDestination.namespace }} + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: + - configmaps + verbs: + - create + - apiGroups: [""] + resources: + - namespaces + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: nats-auth-config + namespace: {{ .Values.natsAuthConfig.nats.authConfigmapDestination.namespace }} + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: nats-auth-config +subjects: + - kind: ServiceAccount + name: nats-auth-config + namespace: {{ .Values.natsAuthConfig.namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: nats-auth-config-binding + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: nats-auth-config + namespace: {{ .Values.natsAuthConfig.namespace }} +{{- end }} \ No newline at end of file diff --git a/charts/edgefarm-network/templates/network-base-config.yaml b/charts/edgefarm-network/templates/network-base-config.yaml new file mode 100644 index 00000000..84f30ce5 --- /dev/null +++ b/charts/edgefarm-network/templates/network-base-config.yaml @@ -0,0 +1,12 @@ +{{- if .Values.networkBaseConfig.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} + name: network-base-config + namespace: crossplane-system +data: + operator: {{ .Values.operatorName }} + sysAccount: sys +{{- end }} \ No newline at end of file diff --git a/charts/edgefarm-network/templates/operator.yaml b/charts/edgefarm-network/templates/operator.yaml new file mode 100644 index 00000000..6ad664d4 --- /dev/null +++ b/charts/edgefarm-network/templates/operator.yaml @@ -0,0 +1,23 @@ +{{- if .Values.operator.enabled }} +apiVersion: issue.natssecrets.crossplane.io/v1alpha1 +kind: Operator +metadata: + name: {{ .Values.operatorName }} + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +spec: + forProvider: + syncAccountServer: true + claims: + operator: + systemAccount: sys + accountServerUrl: {{ .Values.natsAuthConfig.nats.resolver.address }} + operatorServiceUrls: + - {{ .Values.natsAuthConfig.nats.resolver.address }} + strictSigningKeyUsage: false + providerConfigRef: + name: provider-natssecrets + writeConnectionSecretToRef: + namespace: crossplane-system + name: {{ .Values.operatorName }} +{{- end }} \ No newline at end of file diff --git a/charts/edgefarm-network/templates/sysaccount.yaml b/charts/edgefarm-network/templates/sysaccount.yaml new file mode 100644 index 00000000..7bd03338 --- /dev/null +++ b/charts/edgefarm-network/templates/sysaccount.yaml @@ -0,0 +1,74 @@ +{{- if .Values.sysAccount.enabled }} +apiVersion: issue.natssecrets.crossplane.io/v1alpha1 +kind: Account +metadata: + name: sys-{{ .Values.operatorName }} + annotations: + crossplane.io/external-name: sys + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +spec: + forProvider: + operator: {{ .Values.operatorName }} + claims: + account: + limits: + subs: -1 + conn: -1 + leafNodeConn: -1 + data: -1 + payload: -1 + wildcardExports: true + imports: -1 + exports: -1 + exports: + - name: account-monitoring-streams + subject: "$SYS.ACCOUNT.*.>" + type: Stream + accountTokenPosition: 3 + description: Account specific monitoring stream + infoURL: https://docs.nats.io/nats-server/configuration/sys_accounts + - name: account-monitoring-services + subject: "$SYS.ACCOUNT.*.*" + type: Service + responseType: Stream + accountTokenPosition: 4 + description: + "Request account specific monitoring services for: SUBSZ, CONNZ, + LEAFZ, JSZ and INFO" + infoURL: https://docs.nats.io/nats-server/configuration/sys_accounts + providerConfigRef: + name: provider-natssecrets + writeConnectionSecretToRef: + namespace: crossplane-system + name: sys-{{ .Values.operatorName }} +--- +apiVersion: issue.natssecrets.crossplane.io/v1alpha1 +kind: User +metadata: + name: default-push-{{ .Values.operatorName }} + annotations: + crossplane.io/external-name: default-push + labels: + {{- include "edgefarm-network.labels" . | nindent 4 }} +spec: + forProvider: + operator: {{ .Values.operatorName }} + account: sys + claims: + user: + data: -1 + payload: -1 + subs: -1 + pub: + allow: + - "$SYS.REQ.CLAIMS.LIST" + - "$SYS.REQ.CLAIMS.UPDATE" + - "$SYS.REQ.CLAIMS.DELETE" + resp: + sub: + allow: + - _INBOX.> + providerConfigRef: + name: provider-natssecrets +{{- end }} \ No newline at end of file diff --git a/charts/edgefarm-network/values.yaml b/charts/edgefarm-network/values.yaml new file mode 100644 index 00000000..e834aa50 --- /dev/null +++ b/charts/edgefarm-network/values.yaml @@ -0,0 +1,77 @@ +# Set the operator name +operatorName: operator + +natsAuthConfig: + enabled: true + namespace: nats + nats: + resolver: + address: nats://nats.nats.svc:4222 + config: + type: full + dir: "/data/jwt" + allow_delete: true + interval: "2m" + timeout: "1.9s" + authConfigmapDestination: + name: nats-auth-config + namespace: nats + +networkBaseConfig: + enabled: true + +operator: + # set to `true` if you want to create the operator + enabled: true + +sysAccount: + # set to `true` if you want to create the sysAccount + enabled: true + +# Sub-Charts configurations +nats: + namespaceOverride: nats + fullnameOverride: nats + nameOverride: nats + statefulSet: + patch: + - op: add + path: /spec/template/spec/volumes/- + value: { name: auth-config, configMap: { name: nats-auth-config } } + - op: add + path: /spec/template/spec/containers/0/volumeMounts/- + value: { name: auth-config, mountPath: /etc/nats-config/auth/ } + config: + merge: + 00$include: ./auth/auth.config + jetstream: + enabled: true + merge: + domain: main + fileStore: + enabled: true + dir: /data/jetstream + maxSize: 10G + memoryStore: + enabled: true + maxSize: 1G + leafnodes: + enabled: true + port: 7422 + natsBox: + enabled: false + reloader: + enabled: false + extraResources: + - apiVersion: v1 + kind: Namespace + metadata: + labels: + kubernetes.io/metadata.name: nats + name: nats + +provider-natssecrets: + namespace: crossplane-system + +network-resource-info: + namespaceOverride: crossplane-system diff --git a/charts/network-compositions-helm/Chart.yaml b/charts/network-compositions-helm/Chart.yaml new file mode 100644 index 00000000..b5aa8e57 --- /dev/null +++ b/charts/network-compositions-helm/Chart.yaml @@ -0,0 +1,19 @@ +apiVersion: v2 +name: network-compositions-helm +description: A Helm chart for edgefarm.network crossplane compositions. Note that you need to have crossplane, provider-nats, provider-natssecrets and edgenetwork-operator installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "0.1.0" + +keywords: + - edgefarm.network + - crd + - metacontroller + +sources: + - https://github.com/edgefarm/edgefarm.network + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de diff --git a/charts/network-compositions-helm/README.md b/charts/network-compositions-helm/README.md new file mode 100644 index 00000000..b337c1a9 --- /dev/null +++ b/charts/network-compositions-helm/README.md @@ -0,0 +1,10 @@ +# network-compositions + +This helm chart is part of edgefarm.network and contains several Crossplane compositions. +Note, that for this to work you need to have Crossplane (>=v1.11.3) installed and configured. + +## Prerequisites + + Kubernetes 1.22+ + Helm 3.2.0+ + Crossplane 1.11.3+ diff --git a/charts/network-compositions-helm/templates/_helpers.tpl b/charts/network-compositions-helm/templates/_helpers.tpl new file mode 100644 index 00000000..f7f129de --- /dev/null +++ b/charts/network-compositions-helm/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "network-compositions.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "network-compositions.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "network-compositions.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "network-compositions.labels" -}} +helm.sh/chart: {{ include "network-compositions.chart" . }} +{{ include "network-compositions.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "network-compositions.selectorLabels" -}} +app.kubernetes.io/name: {{ include "network-compositions.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + diff --git a/charts/network-compositions-helm/templates/composition-consumer.yaml b/charts/network-compositions-helm/templates/composition-consumer.yaml new file mode 100644 index 00000000..6c36fe32 --- /dev/null +++ b/charts/network-compositions-helm/templates/composition-consumer.yaml @@ -0,0 +1,327 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 08:01:11 on 05-25-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + labels: + streams.network.edgefarm.io/provider: provider-nats + {{- include "network-compositions.labels" . | nindent 4 }} + name: xconsumer.streams.network.edgefarm.io +spec: + compositeTypeRef: + apiVersion: streams.network.edgefarm.io/v1alpha1 + kind: XConsumer + patchSets: + - name: Name + patches: + - fromFieldPath: metadata.annotations[crossplane.io/external-name] + toFieldPath: metadata.annotations[crossplane.io/external-name] + type: FromCompositeFieldPath + - name: Common + patches: + - fromFieldPath: metadata.labels['crossplane.io/claim-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-name'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-namespace'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/composite'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/composite'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['external-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['external-name'] + type: FromCompositeFieldPath + - name: Parameters + patches: + - fromFieldPath: spec.deletionPolicy + policy: + fromFieldPath: Optional + toFieldPath: spec.deletionPolicy + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.ackPolicy + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.ackPolicy + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.ackWait + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.ackWait + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.backoff + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.backoff + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.deliverPolicy + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.deliverPolicy + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.description + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.description + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.filterSubject + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.filterSubject + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.inactiveThreshold + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.inactiveThreshold + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxAckPending + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxAckPending + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxDeliver + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxDeliver + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.memStorage + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.memStorage + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.numReplicas + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.numReplicas + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.optStartSeq + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.optStartSeq + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.optStartTime + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.optStartTime + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.pull.maxBatch + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.pull.maxBatch + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.pull.maxBytes + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.pull.maxBytes + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.pull.maxExpires + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.pull.maxExpires + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.pull.maxWaiting + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.pull.maxWaiting + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.deliverGroup + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.deliverGroup + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.deliverSubject + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.deliverSubject + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.flowControl + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.flowControl + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.headersOnly + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.headersOnly + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.idleHeartbeat + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.idleHeartbeat + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.push.rateLimitBps + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.push.rateLimitBps + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.replayPolicy + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.replayPolicy + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.sampleFreq + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.sampleFreq + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.domain + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.domain + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.stream + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.stream + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.name + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.name + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.policy.resolution + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.policy.resolution + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.policy.resolve + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.policy.resolve + type: FromCompositeFieldPath + - name: Labels + patches: [] + resources: + - base: + apiVersion: nats.crossplane.io/v1alpha1 + kind: Consumer + metadata: {} + spec: + forProvider: {} + providerConfigRef: + name: default + name: Consumer + patches: + - patchSetName: Name + type: PatchSet + - patchSetName: Common + type: PatchSet + - patchSetName: Parameters + type: PatchSet + - patchSetName: Labels + type: PatchSet + - fromFieldPath: status.atProvider.state.ackFloor.consumerSeq + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.ackFloor.consumerSeq + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.ackFloor.lastActive + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.ackFloor.lastActive + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.ackFloor.streamSeq + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.ackFloor.streamSeq + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.cluster.leader + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.cluster.leader + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.cluster.name + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.cluster.name + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.cluster.replicas + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.cluster.replicas + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.created + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.created + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.delivered.consumerSeq + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.delivered.consumerSeq + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.delivered.lastActive + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.delivered.lastActive + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.delivered.streamSeq + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.delivered.streamSeq + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.domain + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.domain + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.durableName + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.durableName + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.name + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.name + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numAckPending + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numAckPending + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numPending + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numPending + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numRedelivered + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numRedelivered + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numWaiting + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numWaiting + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.pushBound + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.pushBound + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.streamName + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.streamName + type: ToCompositeFieldPath + - fromFieldPath: metadata.annotations["crossplane.io/external-name"] + policy: + fromFieldPath: Optional + toFieldPath: status.uid + type: ToCompositeFieldPath + - fromFieldPath: status.conditions + policy: + fromFieldPath: Optional + toFieldPath: status.observed.conditions + type: ToCompositeFieldPath diff --git a/charts/network-compositions-helm/templates/composition-edgenetworks.yaml b/charts/network-compositions-helm/templates/composition-edgenetworks.yaml new file mode 100644 index 00000000..c2235e7e --- /dev/null +++ b/charts/network-compositions-helm/templates/composition-edgenetworks.yaml @@ -0,0 +1,151 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 13:37:31 on 04-24-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + labels: + streams.network.edgefarm.io/provider: provider-kubernetes + {{- include "network-compositions.labels" . | nindent 4 }} + name: xedgenetworks.streams.network.edgefarm.io +spec: + compositeTypeRef: + apiVersion: streams.network.edgefarm.io/v1alpha1 + kind: XEdgeNetwork + patchSets: + - name: Name + patches: + - fromFieldPath: metadata.labels[crossplane.io/claim-name] + toFieldPath: metadata.annotations[crossplane.io/external-name] + type: FromCompositeFieldPath + - name: Common + patches: + - fromFieldPath: metadata.labels['crossplane.io/claim-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-name'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-namespace'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/composite'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/composite'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['external-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['external-name'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.metadata.namespace + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.namespace + type: FromCompositeFieldPath + - name: Parameters + patches: + - fromFieldPath: spec.connectionSecretRefs.sysAccountUserSecretRef.name + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.connectionSecretRefs.sysAccountUserSecretRef.name + type: FromCompositeFieldPath + - fromFieldPath: spec.connectionSecretRefs.systemUserSecretRef.name + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.connectionSecretRefs.systemUserSecretRef.name + type: FromCompositeFieldPath + - fromFieldPath: spec.limits.fileStorage + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.limits.fileStorage + type: FromCompositeFieldPath + - fromFieldPath: spec.limits.inMemoryStorage + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.limits.inMemoryStorage + type: FromCompositeFieldPath + - fromFieldPath: spec.network + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.network + type: FromCompositeFieldPath + - fromFieldPath: spec.nodepoolSelector.matchExpressions + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.nodepoolSelector.matchExpressions + type: FromCompositeFieldPath + - fromFieldPath: spec.nodepoolSelector.matchLabels + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.nodepoolSelector.matchLabels + type: FromCompositeFieldPath + - fromFieldPath: spec.subNetwork + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.subNetwork + type: FromCompositeFieldPath + - fromFieldPath: spec.tolerations + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.manifest.spec.tolerations + type: FromCompositeFieldPath + - name: Labels + patches: [] + resources: + - base: + apiVersion: kubernetes.crossplane.io/v1alpha1 + kind: Object + metadata: {} + spec: + forProvider: + manifest: + apiVersion: network.edgefarm.io/v1alpha1 + kind: EdgeNetwork + spec: {} + providerConfigRef: + name: provider-kubernetes + name: EdgeNetwork + patches: + - patchSetName: Name + type: PatchSet + - patchSetName: Common + type: PatchSet + - patchSetName: Parameters + type: PatchSet + - patchSetName: Labels + type: PatchSet + - fromFieldPath: status.current + policy: + fromFieldPath: Optional + toFieldPath: status.current + type: ToCompositeFieldPath + - fromFieldPath: status.desired + policy: + fromFieldPath: Optional + toFieldPath: status.desired + type: ToCompositeFieldPath + - fromFieldPath: status.ready + policy: + fromFieldPath: Optional + toFieldPath: status.ready + type: ToCompositeFieldPath + - fromFieldPath: metadata.annotations["crossplane.io/external-name"] + policy: + fromFieldPath: Optional + toFieldPath: status.uid + type: ToCompositeFieldPath + - fromFieldPath: status.conditions + policy: + fromFieldPath: Optional + toFieldPath: status.observed.conditions + type: ToCompositeFieldPath diff --git a/charts/network-compositions-helm/templates/composition-networks.yaml b/charts/network-compositions-helm/templates/composition-networks.yaml new file mode 100644 index 00000000..a1b79965 --- /dev/null +++ b/charts/network-compositions-helm/templates/composition-networks.yaml @@ -0,0 +1,505 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + name: xnetworks.streams.network.edgefarm.io + labels: + crossplane.io/xrd: xnetworks.streams.network.edgefarm.io + {{- include "network-compositions.labels" . | nindent 4 }} +spec: + writeConnectionSecretsToNamespace: crossplane-system + compositeTypeRef: + apiVersion: streams.network.edgefarm.io/v1alpha1 + kind: XNetwork + patchSets: + - name: Common + patches: + - fromFieldPath: metadata.labels['crossplane.io/claim-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-name'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-namespace'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/composite'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/composite'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['external-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['external-name'] + type: FromCompositeFieldPath + + - name: provider-natssecrets-common-fields + patches: + - type: FromCompositeFieldPath + fromFieldPath: status.operator + toFieldPath: spec.forProvider.operator + + - fromFieldPath: spec.writeConnectionSecretToRef.namespace + toFieldPath: spec.writeConnectionSecretToRef.namespace + + resources: + - name: get-network-base-config + base: + apiVersion: kubernetes.crossplane.io/v1alpha1 + kind: Object + metadata: + name: network-base-config + spec: + managementPolicy: Observe + forProvider: + manifest: + apiVersion: v1 + kind: ConfigMap + metadata: + name: network-base-config + namespace: crossplane-system + providerConfigRef: + name: provider-kubernetes + patches: + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-get-network-base-config" + toFieldPath: metadata.name + policy: + fromFieldPath: Required + + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.manifest.data[operator] + toFieldPath: status.operator + policy: + fromFieldPath: Required + + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.manifest.data[sysAccount] + toFieldPath: status.sysAccount + policy: + fromFieldPath: Required + readinessChecks: + - type: None + + - name: account + base: + apiVersion: issue.natssecrets.crossplane.io/v1alpha1 + kind: Account + metadata: + name: #patched + # labels: + # dependsOnUid: #patched + spec: + writeConnectionSecretToRef: + namespace: #patched + name: #patched + uid: #patched + forProvider: + operator: #patched + claims: + account: + limits: + streams: -1 + consumer: -1 + memStorage: -1 + diskStorage: -1 + subs: -1 + conn: -1 + leafNodeConn: -1 + data: -1 + payload: -1 + wildcardExports: true + imports: -1 + exports: -1 + providerConfigRef: + name: provider-natssecrets + patches: + - patchSetName: Common + type: PatchSet + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s" + toFieldPath: metadata.labels['external-name'] + policy: + fromFieldPath: Required + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s" + toFieldPath: metadata.annotations['crossplane.io/external-name'] + policy: + fromFieldPath: Required + + - fromFieldPath: status.operator + toFieldPath: spec.forProvider.operator + type: FromCompositeFieldPath + + - fromFieldPath: spec.writeConnectionSecretToRef.namespace + toFieldPath: spec.writeConnectionSecretToRef.namespace + type: FromCompositeFieldPath + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s" + policy: + fromFieldPath: Required + toFieldPath: metadata.name + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-account" + policy: + fromFieldPath: Required + toFieldPath: spec.writeConnectionSecretToRef.name + + - type: ToCompositeFieldPath + fromFieldPath: metadata.uid + toFieldPath: status.account + policy: + fromFieldPath: Required + + - name: sys-account-user + base: + apiVersion: issue.natssecrets.crossplane.io/v1alpha1 + kind: User + metadata: + name: #patched + namespace: #patched + spec: + writeConnectionSecretToRef: + namespace: #patched + name: #patched + forProvider: + operator: #patched + account: sys + claims: + user: + data: -1 + payload: -1 + subs: -1 + providerConfigRef: + name: provider-natssecrets + patches: + - type: FromCompositeFieldPath + fromFieldPath: status.account + toFieldPath: metadata.labels['dependsOnUid'] + policy: + fromFieldPath: Required + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-sys-account-user" + policy: + fromFieldPath: Required + toFieldPath: metadata.name + + - fromFieldPath: status.operator + toFieldPath: spec.forProvider.operator + type: FromCompositeFieldPath + + - fromFieldPath: status.sysAccount + toFieldPath: metadata.annotations["crossplane.io/external-name"] + type: FromCompositeFieldPath + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: spec.writeConnectionSecretToRef.namespace + type: FromCompositeFieldPath + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: metadata.namespace + type: FromCompositeFieldPath + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-sys-account-user" + policy: + fromFieldPath: Required + toFieldPath: spec.writeConnectionSecretToRef.name + + - name: system + base: + apiVersion: issue.natssecrets.crossplane.io/v1alpha1 + kind: User + metadata: + name: #patched + namespace: #patched + spec: + writeConnectionSecretToRef: + namespace: #patched + name: #patched + forProvider: + operator: #patched + account: #patched + claims: + user: + data: -1 + payload: -1 + subs: -1 + providerConfigRef: + name: provider-natssecrets + patches: + - type: FromCompositeFieldPath + fromFieldPath: status.account + toFieldPath: metadata.labels['dependsOnUid'] + policy: + fromFieldPath: Required + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-system" + policy: + fromFieldPath: Required + toFieldPath: metadata.name + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s" + toFieldPath: spec.forProvider.account + policy: + fromFieldPath: Required + + - fromFieldPath: status.operator + toFieldPath: spec.forProvider.operator + type: FromCompositeFieldPath + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: spec.writeConnectionSecretToRef.namespace + type: FromCompositeFieldPath + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: metadata.namespace + type: FromCompositeFieldPath + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-system" + policy: + fromFieldPath: Required + toFieldPath: spec.writeConnectionSecretToRef.name + + - type: ToCompositeFieldPath + fromFieldPath: metadata.uid + toFieldPath: status.system + policy: + fromFieldPath: Required + + - name: provider-nats-config + base: + apiVersion: nats.crossplane.io/v1alpha1 + kind: ProviderConfig + metadata: + name: #patched + namespace: #patched + # labels: + # dependsOnUid: #patched + spec: + credentials: + source: Secret + secretRef: + name: #patched + key: provider-config-secret + patches: + - type: FromCompositeFieldPath + fromFieldPath: status.system + toFieldPath: metadata.labels['dependsOnUid'] + policy: + fromFieldPath: Required + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: metadata.namespace + type: FromCompositeFieldPath + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s" + policy: + fromFieldPath: Required + toFieldPath: metadata.name + + - type: CombineFromComposite + combine: + variables: + - fromFieldPath: metadata.labels["crossplane.io/claim-name"] + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + strategy: string + string: + fmt: "%s-%s-system" + policy: + fromFieldPath: Required + toFieldPath: spec.credentials.secretRef.name + + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] + toFieldPath: spec.credentials.secretRef.namespace + type: FromCompositeFieldPath + readinessChecks: + - type: None + + functions: +{{- if .Values.networkComposition.log2webhook.enabled.initial }} + - name: log2webhook + type: Container + config: + apiVersion: log2webhook.xfn.edgefarm.io/v1alpha1 + kind: Config + spec: + webhookURL: {{ .Values.networkComposition.log2webhook.webhookURL }} + container: + image: {{ .Values.networkComposition.log2webhook.image }}:{{ .Values.networkComposition.log2webhook.tag }} + imagePullPolicy: {{ .Values.networkComposition.log2webhook.imagePullPolicy }} + network: + policy: Runner +{{- end }} + - name: addStreams + type: Container + container: + image: {{ .Values.networkComposition.functions.addStreams.image }}:{{ .Values.networkComposition.functions.addStreams.tag }} + imagePullPolicy: {{ .Values.networkComposition.functions.addStreams.imagePullPolicy }} + timeout: {{ .Values.networkComposition.functions.addStreams.timeout }} + network: + policy: {{ .Values.networkComposition.functions.addStreams.network.policy }} + +{{- if .Values.networkComposition.log2webhook.enabled.afterAddStreams }} + - name: log2webhook + type: Container + config: + apiVersion: log2webhook.xfn.edgefarm.io/v1alpha1 + kind: Config + spec: + webhookURL: {{ .Values.networkComposition.log2webhook.webhookURL }} + container: + image: {{ .Values.networkComposition.log2webhook.image }}:{{ .Values.networkComposition.log2webhook.tag }} + imagePullPolicy: {{ .Values.networkComposition.log2webhook.imagePullPolicy }} + network: + policy: Runner +{{- end }} + + - name: addEdgeNetworks + type: Container + container: + image: {{ .Values.networkComposition.functions.addEdgeNetworks.image }}:{{ .Values.networkComposition.functions.addEdgeNetworks.tag }} + imagePullPolicy: {{ .Values.networkComposition.functions.addEdgeNetworks.imagePullPolicy }} + timeout: {{ .Values.networkComposition.functions.addEdgeNetworks.timeout }} + network: + policy: {{ .Values.networkComposition.functions.addEdgeNetworks.network.policy }} + +{{- if .Values.networkComposition.log2webhook.enabled.afterAddEdgeNetworks }} + - name: log2webhook + type: Container + config: + apiVersion: log2webhook.xfn.edgefarm.io/v1alpha1 + kind: Config + spec: + webhookURL: {{ .Values.networkComposition.log2webhook.webhookURL }} + container: + image: {{ .Values.networkComposition.log2webhook.image }}:{{ .Values.networkComposition.log2webhook.tag }} + imagePullPolicy: {{ .Values.networkComposition.log2webhook.imagePullPolicy }} + network: + policy: Runner +{{- end }} + + - name: addUsers + type: Container + container: + image: {{ .Values.networkComposition.functions.addUsers.image }}:{{ .Values.networkComposition.functions.addUsers.tag }} + imagePullPolicy: {{ .Values.networkComposition.functions.addUsers.imagePullPolicy }} + timeout: {{ .Values.networkComposition.functions.addUsers.timeout }} + network: + policy: {{ .Values.networkComposition.functions.addUsers.network.policy }} + + +{{- if .Values.networkComposition.log2webhook.enabled.afterAddUsers }} + - name: log2webhook + type: Container + config: + apiVersion: log2webhook.xfn.edgefarm.io/v1alpha1 + kind: Config + spec: + webhookURL: {{ .Values.networkComposition.log2webhook.webhookURL }} + container: + image: {{ .Values.networkComposition.log2webhook.image }}:{{ .Values.networkComposition.log2webhook.tag }} + imagePullPolicy: {{ .Values.networkComposition.log2webhook.imagePullPolicy }} + network: + policy: Runner +{{- end }} + + - name: addConsumers + type: Container + container: + image: {{ .Values.networkComposition.functions.addConsumers.image }}:{{ .Values.networkComposition.functions.addConsumers.tag }} + imagePullPolicy: {{ .Values.networkComposition.functions.addConsumers.imagePullPolicy }} + timeout: {{ .Values.networkComposition.functions.addConsumers.timeout }} + network: + policy: {{ .Values.networkComposition.functions.addConsumers.network.policy }} + +{{- if .Values.networkComposition.log2webhook.enabled.afterAddConsumers }} + - name: log2webhook + type: Container + config: + apiVersion: log2webhook.xfn.edgefarm.io/v1alpha1 + kind: Config + spec: + webhookURL: {{ .Values.networkComposition.log2webhook.webhookURL }} + container: + image: {{ .Values.networkComposition.log2webhook.image }}:{{ .Values.networkComposition.log2webhook.tag }} + imagePullPolicy: {{ .Values.networkComposition.log2webhook.imagePullPolicy }} + network: + policy: Runner +{{- end }} \ No newline at end of file diff --git a/charts/network-compositions-helm/templates/composition-streams.yaml b/charts/network-compositions-helm/templates/composition-streams.yaml new file mode 100644 index 00000000..c56c23cb --- /dev/null +++ b/charts/network-compositions-helm/templates/composition-streams.yaml @@ -0,0 +1,367 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 21:03:27 on 05-08-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: Composition +metadata: + labels: + streams.network.edgefarm.io/provider: provider-nats + {{- include "network-compositions.labels" . | nindent 4 }} + name: xstreams.streams.network.edgefarm.io +spec: + compositeTypeRef: + apiVersion: streams.network.edgefarm.io/v1alpha1 + kind: XStream + patchSets: + - name: Name + patches: + - fromFieldPath: metadata.annotations[crossplane.io/external-name] + toFieldPath: metadata.annotations[crossplane.io/external-name] + type: FromCompositeFieldPath + - name: Common + patches: + - fromFieldPath: metadata.labels['crossplane.io/claim-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-name'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/claim-namespace'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/claim-namespace'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['crossplane.io/composite'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['crossplane.io/composite'] + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels['external-name'] + policy: + fromFieldPath: Optional + toFieldPath: metadata.labels['external-name'] + type: FromCompositeFieldPath + - name: Parameters + patches: + - fromFieldPath: spec.deletionPolicy + policy: + fromFieldPath: Optional + toFieldPath: spec.deletionPolicy + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.allowDirect + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.allowDirect + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.allowRollup + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.allowRollup + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.denyDelete + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.denyDelete + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.denyPurge + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.denyPurge + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.description + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.description + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.discard + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.discard + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.discardNewPerSubject + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.discardNewPerSubject + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.duplicates + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.duplicates + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxAge + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxAge + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxBytes + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxBytes + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxConsumers + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxConsumers + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxMsgSize + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxMsgSize + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxMsgs + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxMsgs + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.maxMsgsPerSubject + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.maxMsgsPerSubject + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.domain + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.domain + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.external.apiPrefix + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.external.apiPrefix + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.external.deliverPrefix + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.external.deliverPrefix + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.filterSubject + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.filterSubject + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.name + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.name + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.startSeq + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.startSeq + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirror.startTime + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirror.startTime + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.mirrorDirect + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.mirrorDirect + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.noAck + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.noAck + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.placement.cluster + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.placement.cluster + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.placement.tags + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.placement.tags + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.rePublish.destination + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.rePublish.destination + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.rePublish.headersOnly + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.rePublish.headersOnly + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.rePublish.source + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.rePublish.source + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.replicas + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.replicas + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.retention + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.retention + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.sealed + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.sealed + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.sources + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.sources + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.storage + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.storage + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.subjects + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.subjects + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.config.template + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.config.template + type: FromCompositeFieldPath + - fromFieldPath: spec.forProvider.domain + policy: + fromFieldPath: Optional + toFieldPath: spec.forProvider.domain + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.name + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.name + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.policy.resolution + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.policy.resolution + type: FromCompositeFieldPath + - fromFieldPath: spec.providerConfigRef.policy.resolve + policy: + fromFieldPath: Optional + toFieldPath: spec.providerConfigRef.policy.resolve + type: FromCompositeFieldPath + - name: Labels + patches: [] + resources: + - base: + apiVersion: nats.crossplane.io/v1alpha1 + kind: Stream + metadata: {} + spec: + forProvider: {} + providerConfigRef: + name: default + name: Stream + patches: + - patchSetName: Name + type: PatchSet + - patchSetName: Common + type: PatchSet + - patchSetName: Parameters + type: PatchSet + - patchSetName: Labels + type: PatchSet + - fromFieldPath: status.atProvider.clusterInfo.leader + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.clusterInfo.leader + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.clusterInfo.name + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.clusterInfo.name + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.clusterInfo.replicas + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.clusterInfo.replicas + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.connection.accountPublicKey + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.connection.accountPublicKey + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.connection.address + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.connection.address + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.connection.userPublicKey + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.connection.userPublicKey + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.domain + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.domain + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.bytes + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.bytes + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.consumerCount + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.consumerCount + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.deleted + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.deleted + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.firstSequence + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.firstSequence + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.firstTimestamp + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.firstTimestamp + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.lastSequence + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.lastSequence + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.lastTimestamp + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.lastTimestamp + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.messages + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.messages + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numDeleted + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numDeleted + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.numSubjects + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.numSubjects + type: ToCompositeFieldPath + - fromFieldPath: status.atProvider.state.subjects + policy: + fromFieldPath: Optional + toFieldPath: status.atProvider.state.subjects + type: ToCompositeFieldPath + - fromFieldPath: metadata.annotations["crossplane.io/external-name"] + policy: + fromFieldPath: Optional + toFieldPath: status.uid + type: ToCompositeFieldPath + - fromFieldPath: status.conditions + policy: + fromFieldPath: Optional + toFieldPath: status.observed.conditions + type: ToCompositeFieldPath diff --git a/charts/network-compositions-helm/templates/definition-consumer.yaml b/charts/network-compositions-helm/templates/definition-consumer.yaml new file mode 100644 index 00000000..b5a7cdd7 --- /dev/null +++ b/charts/network-compositions-helm/templates/definition-consumer.yaml @@ -0,0 +1,474 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 08:01:11 on 05-25-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xconsumers.streams.network.edgefarm.io + labels: + {{- include "network-compositions.labels" . | nindent 4 }} +spec: + claimNames: + kind: Consumer + plural: consumers + group: streams.network.edgefarm.io + names: + categories: + - crossplane + - composition + - streams + kind: XConsumer + plural: xconsumers + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .spec.forProvider.domain + name: DOMAIN + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.atProvider.state.pushBound + name: PUSH CONSUMER + priority: 1 + type: string + - jsonPath: .status.atProvider.state.streamName + name: STREAM + priority: 1 + type: string + - jsonPath: .status.atProvider.state.numPending + name: UNPROCESSED + priority: 1 + type: string + - jsonPath: .status.atProvider.state.numRedelivered + name: REDELIVERERD + priority: 1 + type: string + - jsonPath: .status.atProvider.state.numAckPending + name: ACK PENDING + priority: 1 + type: string + name: v1alpha1 + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + description: A ConsumerSpec defines the desired state of a consumer. + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + description: ConsumerParameters are the configurable fields of a consumer. + properties: + config: + description: Config is the consumer configuration. + properties: + ackPolicy: + default: Explicit + description: AckPolicy describes the requirement of client + acknowledgements, either Explicit, None, or All. For more + information see https://docs.nats.io/nats-concepts/jetstream/consumers#ackpolicy + enum: + - Explicit + - None + - All + type: string + ackWait: + default: 30s + description: AckWait is the duration that the server will + wait for an ack for any individual message once it has been + delivered to a consumer. If an ack is not received in time, + the message will be redelivered. Format is a string duration, + e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + backoff: + description: 'Backoff is a list of time durations that represent + the time to delay based on delivery count. Format of the + durations is a string duration, e.g. 1h, 1m, 1s, 1h30m or + 2h3m4s where multiple durations are separated by commas. + Example: `1s,2s,3s,4s,5s`.' + pattern: ^(([0-9]+h)?([0-9]+m)?([0-9]+s)?)(?:,\s*(([0-9]+h)?([0-9]+m)?([0-9]+s)?))*$ + type: string + deliverPolicy: + default: All + description: DeliverPolicy defines the point in the stream + to receive messages from, either All, Last, New, ByStartSequence, + ByStartTime, or LastPerSubject. Fore more information see + https://docs.nats.io/jetstream/concepts/consumers#deliverpolicy + enum: + - All + - Last + - New + - ByStartSequence + - ByStartTime + - LastPerSubject + type: string + description: + description: Description is a human readable description of + the consumer. This can be particularly useful for ephemeral + consumers to indicate their purpose since the durable name + cannot be provided. + type: string + filterSubject: + description: FilterSubject defines an overlapping subject + with the subjects bound to the stream which will filter + the set of messages received by the consumer. + type: string + inactiveThreshold: + description: InactiveThreshold defines the duration that instructs + the server to cleanup consumers that are inactive for that + long. Format is a string duration, e.g. 1h, 1m, 1s, 1h30m + or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxAckPending: + default: 1000 + description: MaxAckPending sets the number of outstanding + acks that are allowed before message delivery is halted. + type: integer + maxDeliver: + default: -1 + description: MaxDeliver is the maximum number of times a specific + message delivery will be attempted. Applies to any message + that is re-sent due to ack policy (i.e. due to a negative + ack, or no ack sent by the client). + type: integer + memStorage: + description: MemoryStorage if set, forces the consumer state + to be kept in memory rather than inherit the storage type + of the stream (file in this case). + type: boolean + numReplicas: + default: 0 + description: Replicas sets the number of replicas for the + consumer's state. By default, when the value is set to zero, + consumers inherit the number of replicas from the stream. + type: integer + optStartSeq: + description: OptStartSeq is an optional start sequence number + and is used with the DeliverByStartSequence deliver policy. + format: int64 + type: integer + optStartTime: + description: OptStartTime is an optional start time and is + used with the DeliverByStartTime deliver policy. The time + format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + pull: + description: PullConsumer defines the pull-based consumer + configuration. + properties: + maxBatch: + description: MaxRequestBatch defines th maximum batch + size a single pull request can make. When set with MaxRequestMaxBytes, + the batch size will be constrained by whichever limit + is hit first. This is a pull consumer specific setting. + type: integer + maxBytes: + description: MaxRequestMaxBytes defines the maximum total + bytes that can be requested in a given batch. When set + with MaxRequestBatch, the batch size will be constrained + by whichever limit is hit first. This is a pull consumer + specific setting. + type: integer + maxExpires: + description: MaxRequestExpires defines the maximum duration + a single pull request will wait for messages to be available + to pull. This is a pull consumer specific setting. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxWaiting: + default: 512 + description: MaxWaiting defines the maximum number of + waiting pull requests. This is a pull consumer specific + setting. + type: integer + type: object + push: + description: PushConsumer defines the push-based consumer + configuration. + properties: + deliverGroup: + description: DeliverGroup defines the queue group name + which, if specified, is then used to distribute the + messages between the subscribers to the consumer. This + is analogous to a queue group in core NATS. See https://docs.nats.io/nats-concepts/core-nats/queue + for more information on queue groups. This is a push + consumer specific setting. + type: string + deliverSubject: + description: DeliverSubject defines the subject to deliver + messages to. Note, setting this field implicitly decides + whether the consumer is push or pull-based. With a deliver + subject, the server will push messages to client subscribed + to this subject. This is a push consumer specific setting. + type: string + flowControl: + description: FlowControl enables per-subscription flow + control using a sliding-window protocol. This protocol + relies on the server and client exchanging messages + to regulate when and how many messages are pushed to + the client. This one-to-one flow control mechanism works + in tandem with the one-to-many flow control imposed + by MaxAckPending across all subscriptions bound to a + consumer. This is a push consumer specific setting. + type: boolean + headersOnly: + description: HeadersOnly delivers, if set, only the headers + of messages in the stream and not the bodies. Additionally + adds Nats-Msg-Size header to indicate the size of the + removed payload. + type: boolean + idleHeartbeat: + description: IdleHeartbeat defines, if set, that the server + will regularly send a status message to the client (i.e. + when the period has elapsed) while there are no new + messages to send. This lets the client know that the + JetStream service is still up and running, even when + there is no activity on the stream. The message status + header will have a code of 100. Unlike FlowControl, + it will have no reply to address. It may have a description + such "Idle Heartbeat". Note that this heartbeat mechanism + is all handled transparently by supported clients and + does not need to be handled by the application. Format + is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + This is a push consumer specific setting. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + rateLimitBps: + description: RateLimit is used to throttle the delivery + of messages to the consumer, in bits per second. + format: int64 + type: integer + type: object + replayPolicy: + default: Instant + description: ReplayPolicy is used to define the mode of message + replay. If the policy is Instant, the messages will be pushed + to the client as fast as possible while adhering to the + Ack Policy, Max Ack Pending and the client's ability to + consume those messages. If the policy is Original, the messages + in the stream will be pushed to the client at the same rate + that they were originally received, simulating the original + timing of messages. + enum: + - Instant + - Original + type: string + sampleFreq: + description: SampleFrequency sets the percentage of acknowledgements + that should be sampled for observability. + pattern: ^([1-9][0-9]?|100)%?$ + type: string + required: + - ackPolicy + - ackWait + - deliverPolicy + - numReplicas + - replayPolicy + type: object + domain: + description: Domain is the domain of the Jetstream stream the + consumer is created for. + type: string + stream: + description: Stream is the name of the Jetstream stream the consumer + is created for. + type: string + required: + - config + - stream + type: object + providerConfigRef: + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + required: + - forProvider + type: object + status: + description: A ConsumerStatus represents the observed state of a consumer. + properties: + atProvider: + description: ConsumerObservation are the observable fields of a consumer. + properties: + state: + description: State is the current state of the consumer + properties: + ackFloor: + description: AckFloor TBD + properties: + consumerSeq: + description: Consumer is the consumer name + format: int64 + type: integer + lastActive: + description: Last is the last time the consumer was active + needs to be converted to time.Time + type: string + streamSeq: + description: Stream is the name of the stream + format: int64 + type: integer + required: + - consumerSeq + - streamSeq + type: object + cluster: + description: Cluster is the cluster information. + properties: + leader: + description: Leader is the leader of the cluster. + type: string + name: + description: Name is the name of the cluster. + type: string + replicas: + description: Replicas are the replicas of the cluster. + items: + description: PeerInfo shows information about all the + peers in the cluster that are supporting the stream + or consumer. + properties: + active: + type: string + current: + type: boolean + lag: + format: int64 + type: integer + name: + type: string + offline: + type: boolean + required: + - active + - current + - name + type: object + type: array + type: object + created: + description: Created is the time the consumer was created. + needs to be converted to time.Time + type: string + delivered: + description: Delivered is the consumer sequence and last activity. + properties: + consumerSeq: + description: Consumer is the consumer name + format: int64 + type: integer + lastActive: + description: Last is the last time the consumer was active + needs to be converted to time.Time + type: string + streamSeq: + description: Stream is the name of the stream + format: int64 + type: integer + required: + - consumerSeq + - streamSeq + type: object + domain: + description: Domain is the domain of the consumer. + type: string + durableName: + description: Durable is the durable name. + type: string + name: + description: Name is the consumer name. + type: string + numAckPending: + description: NumAckPending is the number of messages pending + acknowledgement. + type: integer + numPending: + description: NumPending is the number of messages pending. + format: int64 + type: integer + numRedelivered: + description: NumRedelivered is the number of redelivered messages. + type: integer + numWaiting: + description: NumWaiting is the number of messages waiting + to be delivered. + type: integer + pushBound: + description: PushBound is whether the consumer is push bound. + type: string + streamName: + description: Stream is the stream name. + type: string + required: + - ackFloor + - created + - delivered + - domain + - durableName + - name + - numAckPending + - numPending + - numRedelivered + - numWaiting + - streamName + type: object + type: object + observed: + description: Freeform field containing information about the observed + status. + type: object + x-kubernetes-preserve-unknown-fields: true + uid: + description: The unique ID of this Consumer resource reported by the + provider + type: string + type: object + served: true diff --git a/charts/network-compositions-helm/templates/definition-edgenetwork.yaml b/charts/network-compositions-helm/templates/definition-edgenetwork.yaml new file mode 100644 index 00000000..2498fd21 --- /dev/null +++ b/charts/network-compositions-helm/templates/definition-edgenetwork.yaml @@ -0,0 +1,224 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 13:37:31 on 04-24-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xedgenetworks.streams.network.edgefarm.io + labels: + {{- include "network-compositions.labels" . | nindent 4 }} +spec: + claimNames: + kind: EdgeNetwork + plural: edgenetworks + group: streams.network.edgefarm.io + names: + categories: + - crossplane + - composition + - streams + kind: XEdgeNetwork + plural: xedgenetworks + versions: + - additionalPrinterColumns: + - jsonPath: .spec.network + name: NETWORK + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.desired + name: DESIRED + type: string + - jsonPath: .status.current + name: CURRENT + type: string + - jsonPath: .status.ready + name: READY + type: string + - jsonPath: .spec.limits.fileStorage + name: FILE LIMIT + priority: 1 + type: string + - jsonPath: .spec.limits.inMemoryStorage + name: MEMORY LIMIT + priority: 1 + type: string + name: v1alpha1 + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + description: The spec to define an edge network + properties: + connectionSecretRefs: + description: The connection secrets to connect to the network + properties: + sysAccountUserSecretRef: + description: Reference to the secret containing the credentials + to connect to the network as a system account user + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + systemUserSecretRef: + description: Reference to the secret containing the credentials + to connect to the network as a system user + properties: + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?' + type: string + type: object + x-kubernetes-map-type: atomic + required: + - sysAccountUserSecretRef + - systemUserSecretRef + type: object + limits: + description: Hardware limits for the edge network + properties: + fileStorage: + default: 1G + description: How much disk space is available for data that is + stored on disk + pattern: ^\d+[GMKB]?$ + type: string + inMemoryStorage: + default: 1G + description: How much memory is available for data that is stored + in memory + pattern: ^\d+[GMKB]?$ + type: string + required: + - fileStorage + - inMemoryStorage + type: object + network: + description: The name of the network + type: string + nodepoolSelector: + description: NodePoolSelector is a label query over nodepool that + should match the replica count. It must match the nodepool's labels. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + subNetwork: + description: The name of the sub network + type: string + tolerations: + description: Indicates the tolerations the pods under this pool have. + A pool's tolerations is not allowed to be updated. + items: + description: The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + required: + - connectionSecretRefs + - limits + - network + - nodepoolSelector + - subNetwork + type: object + status: + properties: + current: + description: The amount of current participants in the edge network + type: string + desired: + description: The amount of desired participants in the edge network + type: string + observed: + description: Freeform field containing information about the observed + status. + type: object + x-kubernetes-preserve-unknown-fields: true + ready: + description: The amount of ready participants in the edge network + type: string + uid: + description: The unique ID of this EdgeNetwork resource reported by + the provider + type: string + type: object + served: true diff --git a/charts/network-compositions-helm/templates/definition-network.yaml b/charts/network-compositions-helm/templates/definition-network.yaml new file mode 100644 index 00000000..d0242b63 --- /dev/null +++ b/charts/network-compositions-helm/templates/definition-network.yaml @@ -0,0 +1,904 @@ +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xnetworks.streams.network.edgefarm.io +spec: + group: streams.network.edgefarm.io + names: + kind: XNetwork + plural: xnetworks + singular: xnetwork + claimNames: + kind: Network + plural: networks + singular: network + versions: + - name: v1alpha1 + served: true + referenceable: true + schema: + openAPIV3Schema: + type: object + properties: + status: + type: object + description: > + A Status represents the observed states + properties: + operator: + description: > + The operator for the NATS server + type: string + sysAccount: + description: > + The sys account name for the NATS server + type: string + system: + description: > + The UID of the secret user resource + type: string + account: + description: > + The UID of the account resource + type: string + spec: + type: object + properties: + parameters: + type: object + properties: + subNetworks: + type: array + description: > + The subnetworks that are part of this network + items: + type: object + description: Configuration for the sub network + required: + - name + - limits + properties: + name: + type: string + description: The name of the sub network + limits: + description: Hardware limits for the sub network + properties: + fileStorage: + default: 1G + description: + How much disk space is available for data that is + stored on disk + pattern: ^\d+[GMKB]?$ + type: string + inMemoryStorage: + default: 1G + description: + How much memory is available for data that is stored + in memory + pattern: ^\d+[GMKB]?$ + type: string + required: + - fileStorage + - inMemoryStorage + type: object + nodepoolSelector: + description: + NodePoolSelector is a label query over nodepool that + should match the replica count. It must match the nodepool's labels. + properties: + matchExpressions: + description: + matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: + A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: + key is the label key that the selector applies + to. + type: string + operator: + description: + operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: + values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: + matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object + x-kubernetes-map-type: atomic + tolerations: + description: + Indicates the tolerations the pods under this pool have. + A pool's tolerations is not allowed to be updated. + items: + description: + The pod this Toleration is attached to tolerates any + taint that matches the triple using the matching + operator . + properties: + effect: + description: + Effect indicates the taint effect to match. Empty + means match all taint effects. When specified, allowed values + are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: + Key is the taint key that the toleration applies + to. Empty means match all taint keys. If the key is empty, + operator must be Exists; this combination means to match all + values and all keys. + type: string + operator: + description: + Operator represents a key's relationship to the + value. Valid operators are Exists and Equal. Defaults to Equal. + Exists is equivalent to wildcard for value, so that a pod + can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: + TolerationSeconds represents the period of time + the toleration (which must be of effect NoExecute, otherwise + this field is ignored) tolerates the taint. By default, it + is not set, which means tolerate the taint forever (do not + evict). Zero and negative values will be treated as 0 (evict + immediately) by the system. + format: int64 + type: integer + value: + description: + Value is the taint value the toleration matches + to. If the operator is Exists, the value should be empty, + otherwise just a regular string. + type: string + type: object + type: array + nats: + description: NATS config + type: object + required: + - operator + - address + properties: + operator: + description: The name of the operator the account is created for + type: string + address: + type: string + description: > + The address of the NATS server + users: + description: List of users to create + type: array + items: + type: object + properties: + name: + description: The name of the user + type: string + writeToSecret: + type: object + required: + - name + description: The secret to write the user credentials to + properties: + name: + description: The name of the secret + type: string + limits: + description: The limits for the user + type: object + properties: + subscriptions: + description: Specifies the maximum number of subscriptions + format: int64 + type: integer + + payload: + description: Specifies the maximum message payload + format: int64 + type: integer + + data: + description: Specifies the maximum number of bytes + format: int64 + type: integer + permissions: + description: The pub/sub permissions for the user + type: object + properties: + sub: + description: Specifies the subscribe permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + pub: + description: Specifies the publish permissions + properties: + allow: + description: Specifies allowed subjects + items: + type: string + type: array + deny: + description: Specifies denied subjects + items: + type: string + type: array + type: object + consumers: + type: array + description: List of consumers + items: + type: object + description: Configuration for a consumer + properties: + name: + description: The name of the consumer + type: string + streamRef: + description: The name of the stream the consumer is created for + default: main + type: string + config: + description: Config is the consumer configuration. + properties: + ackPolicy: + default: Explicit + description: + AckPolicy describes the requirement of client + acknowledgements, either Explicit, None, or All. For more + information see https://docs.nats.io/nats-concepts/jetstream/consumers#ackpolicy + enum: + - Explicit + - None + - All + type: string + ackWait: + default: 30s + description: + AckWait is the duration that the server will + wait for an ack for any individual message once it has been + delivered to a consumer. If an ack is not received in time, + the message will be redelivered. Format is a string duration, + e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + backoff: + description: + "Backoff is a list of time durations that represent + the time to delay based on delivery count. Format of the + durations is a string duration, e.g. 1h, 1m, 1s, 1h30m or + 2h3m4s where multiple durations are separated by commas. + Example: `1s,2s,3s,4s,5s`." + pattern: ^(([0-9]+h)?([0-9]+m)?([0-9]+s)?)(?:,\s*(([0-9]+h)?([0-9]+m)?([0-9]+s)?))*$ + type: string + deliverPolicy: + default: All + description: + DeliverPolicy defines the point in the stream + to receive messages from, either All, Last, New, ByStartSequence, + ByStartTime, or LastPerSubject. Fore more information see + https://docs.nats.io/jetstream/concepts/consumers#deliverpolicy + enum: + - All + - Last + - New + - ByStartSequence + - ByStartTime + - LastPerSubject + type: string + description: + description: + Description is a human readable description of + the consumer. This can be particularly useful for ephemeral + consumers to indicate their purpose since the durable name + cannot be provided. + type: string + filterSubject: + description: + FilterSubject defines an overlapping subject + with the subjects bound to the stream which will filter + the set of messages received by the consumer. + type: string + inactiveThreshold: + description: + InactiveThreshold defines the duration that instructs + the server to cleanup consumers that are inactive for that + long. Format is a string duration, e.g. 1h, 1m, 1s, 1h30m + or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxAckPending: + default: 1000 + description: + MaxAckPending sets the number of outstanding + acks that are allowed before message delivery is halted. + type: integer + maxDeliver: + default: -1 + description: + MaxDeliver is the maximum number of times a specific + message delivery will be attempted. Applies to any message + that is re-sent due to ack policy (i.e. due to a negative + ack, or no ack sent by the client). + type: integer + memStorage: + description: + MemoryStorage if set, forces the consumer state + to be kept in memory rather than inherit the storage type + of the stream (file in this case). + type: boolean + numReplicas: + default: 0 + description: + Replicas sets the number of replicas for the + consumer's state. By default, when the value is set to zero, + consumers inherit the number of replicas from the stream. + type: integer + optStartSeq: + description: + OptStartSeq is an optional start sequence number + and is used with the DeliverByStartSequence deliver policy. + format: int64 + type: integer + optStartTime: + description: + OptStartTime is an optional start time and is + used with the DeliverByStartTime deliver policy. The time + format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + pull: + description: + PullConsumer defines the pull-based consumer + configuration. + properties: + maxBatch: + description: + MaxRequestBatch defines th maximum batch + size a single pull request can make. When set with MaxRequestMaxBytes, + the batch size will be constrained by whichever limit + is hit first. This is a pull consumer specific setting. + type: integer + maxBytes: + description: + MaxRequestMaxBytes defines the maximum total + bytes that can be requested in a given batch. When set + with MaxRequestBatch, the batch size will be constrained + by whichever limit is hit first. This is a pull consumer + specific setting. + type: integer + maxExpires: + description: + MaxRequestExpires defines the maximum duration + a single pull request will wait for messages to be available + to pull. This is a pull consumer specific setting. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxWaiting: + default: 512 + description: + MaxWaiting defines the maximum number of + waiting pull requests. This is a pull consumer specific + setting. + type: integer + type: object + push: + description: + PushConsumer defines the push-based consumer + configuration. + properties: + deliverGroup: + description: + DeliverGroup defines the queue group name + which, if specified, is then used to distribute the + messages between the subscribers to the consumer. This + is analogous to a queue group in core NATS. See https://docs.nats.io/nats-concepts/core-nats/queue + for more information on queue groups. This is a push + consumer specific setting. + type: string + deliverSubject: + description: + DeliverSubject defines the subject to deliver + messages to. Note, setting this field implicitly decides + whether the consumer is push or pull-based. With a deliver + subject, the server will push messages to client subscribed + to this subject. This is a push consumer specific setting. + type: string + flowControl: + description: + FlowControl enables per-subscription flow + control using a sliding-window protocol. This protocol + relies on the server and client exchanging messages + to regulate when and how many messages are pushed to + the client. This one-to-one flow control mechanism works + in tandem with the one-to-many flow control imposed + by MaxAckPending across all subscriptions bound to a + consumer. This is a push consumer specific setting. + type: boolean + headersOnly: + description: + HeadersOnly delivers, if set, only the headers + of messages in the stream and not the bodies. Additionally + adds Nats-Msg-Size header to indicate the size of the + removed payload. + type: boolean + idleHeartbeat: + description: + IdleHeartbeat defines, if set, that the server + will regularly send a status message to the client (i.e. + when the period has elapsed) while there are no new + messages to send. This lets the client know that the + JetStream service is still up and running, even when + there is no activity on the stream. The message status + header will have a code of 100. Unlike FlowControl, + it will have no reply to address. It may have a description + such "Idle Heartbeat". Note that this heartbeat mechanism + is all handled transparently by supported clients and + does not need to be handled by the application. Format + is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + This is a push consumer specific setting. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + rateLimitBps: + description: + RateLimit is used to throttle the delivery + of messages to the consumer, in bits per second. + format: int64 + type: integer + type: object + replayPolicy: + default: Instant + description: + ReplayPolicy is used to define the mode of message + replay. If the policy is Instant, the messages will be pushed + to the client as fast as possible while adhering to the + Ack Policy, Max Ack Pending and the client's ability to + consume those messages. If the policy is Original, the messages + in the stream will be pushed to the client at the same rate + that they were originally received, simulating the original + timing of messages. + enum: + - Instant + - Original + type: string + sampleFreq: + description: + SampleFrequency sets the percentage of acknowledgements + that should be sampled for observability. + pattern: ^([1-9][0-9]?|100)%?$ + type: string + required: + - ackPolicy + - ackWait + - deliverPolicy + - numReplicas + - replayPolicy + type: object + + streams: + type: array + description: List of streams + items: + type: object + description: Configuration for a stream + properties: + name: + description: The name of the stream + type: string + subNetworkRef: + description: The name of the sub network the stream is created for + default: main + type: string + type: + description: The type of the stream + type: string + default: Standard + enum: + - Standard + - Aggregate + - Mirror + reference: + description: When type is mirror, the name of the stream to mirror + type: string + references: + description: When type is aggregate, the names of the streams to aggregate + type: array + items: + type: string + config: + description: Config is the stream configuration. + properties: + allowDirect: + description: + AllowDirect is a flag that if true and the stream + has more than one replica, each replica will respond to + direct get requests for individual messages, not only the + leader. + type: boolean + allowRollup: + description: + AllowRollup is a flag to allow the use of the + Nats-Rollup header to replace all contents of a stream, + or subject in a stream, with a single new message. + type: boolean + denyDelete: + description: + DenyDelete is a flag to restrict the ability + to delete messages from a stream via the API. + type: boolean + denyPurge: + description: + DenyPurge is a flag to restrict the ability to + purge messages from a stream via the API. + type: boolean + description: + description: + Description is a human readable description of + the stream. + type: string + discard: + default: Old + description: + "Discard defines the behavior of discarding messages + when any streams' limits have been reached. Old (default): + This policy will delete the oldest messages in order to + maintain the limit. For example, if MaxAge is set to one + minute, the server will automatically delete messages older + than one minute with this policy. New: This policy will + reject new messages from being appended to the stream if + it would exceed one of the limits. An extension to this + policy is DiscardNewPerSubject which will apply this policy + on a per-subject basis within the stream." + enum: + - Old + - New + type: string + discardNewPerSubject: + default: false + description: + DiscardOldPerSubject will discard old messages + per subject. + type: boolean + duplicates: + default: 2m0s + description: + Duplicates defines the time window within which + to track duplicate messages. + pattern: ^(([0-9]+[smh]){1,3})$ + type: string + maxAge: + default: 0s + description: + MaxAge is the maximum age of a message in the + stream. Format is a string duration, e.g. 1h, 1m, 1s, 1h30m + or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxBytes: + default: -1 + description: + MaxBytes defines how many bytes the Stream may + contain. Adheres to Discard Policy, removing oldest or refusing + new messages if the Stream exceeds this size. + format: int64 + type: integer + maxConsumers: + default: -1 + description: + MaxConsumers defines how many Consumers can be + defined for a given Stream. Define -1 for unlimited. + type: integer + maxMsgSize: + default: -1 + description: + MaxBytesPerSubject defines the largest message + that will be accepted by the Stream. + format: int32 + minimum: -1 + type: integer + maxMsgs: + default: -1 + description: + MaxMsgs defines how many messages may be in a + Stream. Adheres to Discard Policy, removing oldest or refusing + new messages if the Stream exceeds this number of messages. + format: int64 + type: integer + maxMsgsPerSubject: + default: -1 + description: + MaxMsgsPerSubject defines the limits how many + messages in the stream to retain per subject. + format: int64 + minimum: -1 + type: integer + mirror: + description: Mirror is the mirror configuration for the stream. + properties: + domain: + description: + Domain is the JetStream domain of where the + origin stream exists. This is commonly used between + a cluster/supercluster and a leaf node/cluster. + type: string + external: + description: External is the external stream configuration. + properties: + apiPrefix: + description: + APIPrefix is the prefix for the API of + the external stream. + type: string + deliverPrefix: + description: + DeliverPrefix is the prefix for the deliver + subject of the external stream. + type: string + required: + - apiPrefix + type: object + filterSubject: + description: + FilterSubject is an optional filter subject + which will include only messages that match the subject, + typically including a wildcard. + type: string + name: + description: + Name of the origin stream to source messages + from. + type: string + startSeq: + description: + StartSeq is an optional start sequence the + of the origin stream to start mirroring from. + format: int64 + type: integer + startTime: + description: + StartTime is an optional message start time + to start mirroring from. Any messages that are equal + to or greater than the start time will be included. + The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + required: + - name + type: object + mirrorDirect: + description: + MirrorDirect is a flag that if true, and the + stream is a mirror, the mirror will participate in a serving + direct get requests for individual messages from origin + stream. + type: boolean + noAck: + default: false + description: + NoAck is a flag to disable acknowledging messages + that are received by the Stream. + type: boolean + placement: + description: Placement is the placement policy for the stream. + properties: + cluster: + description: Cluster is the name of the Jetstream cluster. + type: string + tags: + description: Tags defines a list of server tags. + items: + type: string + type: array + required: + - cluster + type: object + rePublish: + description: + Allow republish of the message after being sequenced + and stored. + properties: + destination: + description: + Destination is the destination subject messages + will be re-published to. The source and destination + must be a valid subject mapping. For information on + subject mapping see https://docs.nats.io/jetstream/concepts/subjects#subject-mapping + type: string + headersOnly: + description: + HeadersOnly defines if true, that the message + data will not be included in the re-published message, + only an additional header Nats-Msg-Size indicating the + size of the message in bytes. + type: boolean + source: + default: ">" + description: + Source is an optional subject pattern which + is a subset of the subjects bound to the stream. It + defaults to all messages in the stream, e.g. >. + type: string + required: + - destination + - source + type: object + replicas: + default: 1 + description: + Replicas defines how many replicas to keep for + each message in a clustered JetStream. + maximum: 5 + minimum: 1 + type: integer + retention: + default: Limits + description: + Retention defines the retention policy for the + stream. + enum: + - Limits + - Interest + - WorkQueue + type: string + sealed: + description: + Sealed is a flag to prevent message deletion + from the stream via limits or API. + type: boolean + sources: + description: + Sources is the list of one or more sources configurations + for the stream. + items: + description: + StreamSource dictates how streams can source + from other streams. + properties: + domain: + description: + Domain is the JetStream domain of where + the origin stream exists. This is commonly used between + a cluster/supercluster and a leaf node/cluster. + type: string + external: + description: External is the external stream configuration. + properties: + apiPrefix: + description: + APIPrefix is the prefix for the API + of the external stream. + type: string + deliverPrefix: + description: + DeliverPrefix is the prefix for the + deliver subject of the external stream. + type: string + required: + - apiPrefix + type: object + filterSubject: + description: + FilterSubject is an optional filter subject + which will include only messages that match the subject, + typically including a wildcard. + type: string + name: + description: + Name of the origin stream to source messages + from. + type: string + startSeq: + description: + StartSeq is an optional start sequence + the of the origin stream to start mirroring from. + format: int64 + type: integer + startTime: + description: + StartTime is an optional message start + time to start mirroring from. Any messages that are + equal to or greater than the start time will be included. + The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + required: + - name + type: object + type: array + storage: + default: File + description: Storage defines the storage type for stream data.. + enum: + - File + - Memory + type: string + subjects: + description: + Subjects is a list of subjects to consume, supports + wildcards. + items: + type: string + type: array + template: + description: + Template is the owner of the template associated + with this stream. + type: string + required: + - discard + - maxBytes + - maxConsumers + - maxMsgs + - retention + - storage + type: object + required: + - config + + resourceConfig: + type: object + description: Defines general properties for this resource. + properties: + accountSecret: + description: Name of secret containing the account information + type: string + userSecret: + description: Name of secret containing the user information + type: string + natssecrets: + description: Config for provider natssecrets + type: object + properties: + providerConfigName: + description: Name of provider config to use for natssecrets + type: string + kubernetes: + description: Config for provider kubernetes + type: object + properties: + providerConfigName: + description: Name of provider config to use for kubernetes + type: string + required: + - parameters diff --git a/charts/network-compositions-helm/templates/definition-streams.yaml b/charts/network-compositions-helm/templates/definition-streams.yaml new file mode 100644 index 00000000..78880afc --- /dev/null +++ b/charts/network-compositions-helm/templates/definition-streams.yaml @@ -0,0 +1,540 @@ +## WARNING: This file was autogenerated! +## Manual modifications will be overwritten +## unless ignore: true is set in generate.yaml! +## Last Modification: 07:33:00 on 03-30-2023. + +apiVersion: apiextensions.crossplane.io/v1 +kind: CompositeResourceDefinition +metadata: + name: xstreams.streams.network.edgefarm.io + labels: + {{- include "network-compositions.labels" . | nindent 4 }} +spec: + claimNames: + kind: Stream + plural: streams + group: streams.network.edgefarm.io + names: + categories: + - crossplane + - composition + - streams + kind: XStream + plural: xstreams + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.annotations.crossplane\.io/external-name + name: EXTERNAL-NAME + type: string + - jsonPath: .spec.forProvider.domain + name: DOMAIN + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .status.atProvider.connection.address + name: ADDRESS + priority: 1 + type: string + - jsonPath: .status.atProvider.connection.accountPublicKey + name: ACCOUNT PUB KEY + priority: 1 + type: string + - jsonPath: .status.atProvider.state.messages + name: MESSAGES + priority: 1 + type: string + - jsonPath: .status.atProvider.state.bytes + name: BYTES + priority: 1 + type: string + - jsonPath: .status.atProvider.state.consumerCount + name: CONSUMERS + priority: 1 + type: string + name: v1alpha1 + referenceable: true + schema: + openAPIV3Schema: + properties: + spec: + description: A StreamSpec defines the desired state of a Stream. + properties: + deletionPolicy: + default: Delete + description: DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. + enum: + - Orphan + - Delete + type: string + forProvider: + description: StreamParameters are the configurable fields of a Stream. + properties: + config: + description: Config is the stream configuration. + properties: + allowDirect: + description: AllowDirect is a flag that if true and the stream + has more than one replica, each replica will respond to + direct get requests for individual messages, not only the + leader. + type: boolean + allowRollup: + description: AllowRollup is a flag to allow the use of the + Nats-Rollup header to replace all contents of a stream, + or subject in a stream, with a single new message. + type: boolean + denyDelete: + description: DenyDelete is a flag to restrict the ability + to delete messages from a stream via the API. + type: boolean + denyPurge: + description: DenyPurge is a flag to restrict the ability to + purge messages from a stream via the API. + type: boolean + description: + description: Description is a human readable description of + the stream. + type: string + discard: + default: Old + description: 'Discard defines the behavior of discarding messages + when any streams'' limits have been reached. Old (default): + This policy will delete the oldest messages in order to + maintain the limit. For example, if MaxAge is set to one + minute, the server will automatically delete messages older + than one minute with this policy. New: This policy will + reject new messages from being appended to the stream if + it would exceed one of the limits. An extension to this + policy is DiscardNewPerSubject which will apply this policy + on a per-subject basis within the stream.' + enum: + - Old + - New + type: string + discardNewPerSubject: + default: false + description: DiscardOldPerSubject will discard old messages + per subject. + type: boolean + duplicates: + default: 2m0s + description: Duplicates defines the time window within which + to track duplicate messages. + pattern: ^(([0-9]+[smh]){1,3})$ + type: string + maxAge: + default: 0s + description: MaxAge is the maximum age of a message in the + stream. Format is a string duration, e.g. 1h, 1m, 1s, 1h30m + or 2h3m4s. + pattern: ([0-9]+h)?([0-9]+m)?([0-9]+s)? + type: string + maxBytes: + default: -1 + description: MaxBytes defines how many bytes the Stream may + contain. Adheres to Discard Policy, removing oldest or refusing + new messages if the Stream exceeds this size. + format: int64 + type: integer + maxConsumers: + default: -1 + description: MaxConsumers defines how many Consumers can be + defined for a given Stream. Define -1 for unlimited. + type: integer + maxMsgSize: + default: -1 + description: MaxBytesPerSubject defines the largest message + that will be accepted by the Stream. + format: int32 + minimum: -1 + type: integer + maxMsgs: + default: -1 + description: MaxMsgs defines how many messages may be in a + Stream. Adheres to Discard Policy, removing oldest or refusing + new messages if the Stream exceeds this number of messages. + format: int64 + type: integer + maxMsgsPerSubject: + default: -1 + description: MaxMsgsPerSubject defines the limits how many + messages in the stream to retain per subject. + format: int64 + minimum: -1 + type: integer + mirror: + description: Mirror is the mirror configuration for the stream. + properties: + domain: + description: Domain is the JetStream domain of where the + origin stream exists. This is commonly used between + a cluster/supercluster and a leaf node/cluster. + type: string + external: + description: External is the external stream configuration. + properties: + apiPrefix: + description: APIPrefix is the prefix for the API of + the external stream. + type: string + deliverPrefix: + description: DeliverPrefix is the prefix for the deliver + subject of the external stream. + type: string + required: + - apiPrefix + type: object + filterSubject: + description: FilterSubject is an optional filter subject + which will include only messages that match the subject, + typically including a wildcard. + type: string + name: + description: Name of the origin stream to source messages + from. + type: string + startSeq: + description: StartSeq is an optional start sequence the + of the origin stream to start mirroring from. + format: int64 + type: integer + startTime: + description: StartTime is an optional message start time + to start mirroring from. Any messages that are equal + to or greater than the start time will be included. + The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + required: + - name + type: object + mirrorDirect: + description: MirrorDirect is a flag that if true, and the + stream is a mirror, the mirror will participate in a serving + direct get requests for individual messages from origin + stream. + type: boolean + noAck: + default: false + description: NoAck is a flag to disable acknowledging messages + that are received by the Stream. + type: boolean + placement: + description: Placement is the placement policy for the stream. + properties: + cluster: + description: Cluster is the name of the Jetstream cluster. + type: string + tags: + description: Tags defines a list of server tags. + items: + type: string + type: array + required: + - cluster + type: object + rePublish: + description: Allow republish of the message after being sequenced + and stored. + properties: + destination: + description: Destination is the destination subject messages + will be re-published to. The source and destination + must be a valid subject mapping. For information on + subject mapping see https://docs.nats.io/jetstream/concepts/subjects#subject-mapping + type: string + headersOnly: + description: HeadersOnly defines if true, that the message + data will not be included in the re-published message, + only an additional header Nats-Msg-Size indicating the + size of the message in bytes. + type: boolean + source: + default: '>' + description: Source is an optional subject pattern which + is a subset of the subjects bound to the stream. It + defaults to all messages in the stream, e.g. >. + type: string + required: + - destination + - source + type: object + replicas: + default: 1 + description: Replicas defines how many replicas to keep for + each message in a clustered JetStream. + maximum: 5 + minimum: 1 + type: integer + retention: + default: Limits + description: Retention defines the retention policy for the + stream. + enum: + - Limits + - Interest + - WorkQueue + type: string + sealed: + description: Sealed is a flag to prevent message deletion + from the stream via limits or API. + type: boolean + sources: + description: Sources is the list of one or more sources configurations + for the stream. + items: + description: StreamSource dictates how streams can source + from other streams. + properties: + domain: + description: Domain is the JetStream domain of where + the origin stream exists. This is commonly used between + a cluster/supercluster and a leaf node/cluster. + type: string + external: + description: External is the external stream configuration. + properties: + apiPrefix: + description: APIPrefix is the prefix for the API + of the external stream. + type: string + deliverPrefix: + description: DeliverPrefix is the prefix for the + deliver subject of the external stream. + type: string + required: + - apiPrefix + type: object + filterSubject: + description: FilterSubject is an optional filter subject + which will include only messages that match the subject, + typically including a wildcard. + type: string + name: + description: Name of the origin stream to source messages + from. + type: string + startSeq: + description: StartSeq is an optional start sequence + the of the origin stream to start mirroring from. + format: int64 + type: integer + startTime: + description: StartTime is an optional message start + time to start mirroring from. Any messages that are + equal to or greater than the start time will be included. + The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + pattern: ^((?:(\d{4}-\d{2}-\d{2})T(\d{2}:\d{2}:\d{2}(?:\.\d+)?))(Z|[\+-]\d{2}:\d{2})?)$ + type: string + required: + - name + type: object + type: array + storage: + default: File + description: Storage defines the storage type for stream data.. + enum: + - File + - Memory + type: string + subjects: + description: Subjects is a list of subjects to consume, supports + wildcards. + items: + type: string + type: array + template: + description: Template is the owner of the template associated + with this stream. + type: string + required: + - discard + - maxBytes + - maxConsumers + - maxMsgs + - retention + - storage + type: object + domain: + description: Domain is the Jetstream domain in which the stream + is created. + type: string + required: + - config + type: object + providerConfigRef: + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + required: + - forProvider + type: object + status: + description: A StreamStatus represents the observed state of a Stream. + properties: + atProvider: + description: StreamObservation are the observable fields of a Stream. + properties: + clusterInfo: + description: ClusterInfo shows information about the underlying + set of servers that make up the stream. + properties: + leader: + description: Leader is the leader of the cluster. + type: string + name: + description: Name is the name of the cluster. + type: string + replicas: + description: Replicas are the replicas of the cluster. + items: + description: PeerInfo shows information about all the peers + in the cluster that are supporting the stream or consumer. + properties: + active: + type: string + current: + type: boolean + lag: + format: int64 + type: integer + name: + type: string + offline: + type: boolean + required: + - active + - current + - name + type: object + type: array + type: object + connection: + description: Connection shows information about the connection + to the stream. + properties: + accountPublicKey: + description: AccountPublicKey is the public key of the used + account. + type: string + address: + description: Address is the address of the connection. + type: string + userPublicKey: + description: UserPublicKey is the public key of the used user. + type: string + required: + - accountPublicKey + - address + - userPublicKey + type: object + domain: + description: Domain is the Jetstream domain in which the stream + is created. + type: string + state: + description: State is the current state of the stream + properties: + bytes: + description: Bytes is the number of bytes in the stream. + type: string + consumerCount: + description: ConsumerCount is the number of consumers in the + stream. + type: integer + deleted: + description: Deleted TBD + items: + format: int64 + type: integer + type: array + firstSequence: + description: FirstSequence is the first sequence number in + the stream. + format: int64 + type: integer + firstTimestamp: + description: FirstTimestamp is the first timestamp in the + stream. + type: string + lastSequence: + description: LastSequence is the last sequence number in the + stream. + format: int64 + type: integer + lastTimestamp: + description: LastTimestamp is the last timestamp in the stream. + type: string + messages: + description: Mesasges is the number of messages in the stream. + format: int64 + type: integer + numDeleted: + description: NumDeleted TBD + type: integer + numSubjects: + description: NumSubjects is the number of subjects in the + stream. + format: int64 + type: integer + subjects: + additionalProperties: + format: int64 + type: integer + description: Subjects is a map of subjects to their number + of messages. + type: object + required: + - bytes + - consumerCount + - firstSequence + - lastSequence + - messages + type: object + type: object + observed: + description: Freeform field containing information about the observed + status. + type: object + x-kubernetes-preserve-unknown-fields: true + uid: + description: The unique ID of this Stream resource reported by the + provider + type: string + type: object + served: true diff --git a/charts/network-compositions-helm/values.yaml b/charts/network-compositions-helm/values.yaml new file mode 100644 index 00000000..5016efbe --- /dev/null +++ b/charts/network-compositions-helm/values.yaml @@ -0,0 +1,55 @@ +networkComposition: + # The order of functions execution is fixed and cannot be changed. + # The order is: + # 1. addStreams + # 2. addEdgeNetworks + # 3. addUsers + # 4. addConsumers + functions: + addStreams: + image: ghcr.io/edgefarm/edgefarm.network/xfn-streams + tag: 1.0.0-beta.23 + imagePullPolicy: IfNotPresent + timeout: 60s + network: + policy: Runner + + addEdgeNetworks: + image: ghcr.io/edgefarm/edgefarm.network/xfn-edgenetworks + tag: 1.0.0-beta.23 + imagePullPolicy: IfNotPresent + timeout: 60s + network: + policy: Isolated + + addUsers: + image: ghcr.io/edgefarm/edgefarm.network/xfn-users + tag: 1.0.0-beta.23 + imagePullPolicy: IfNotPresent + timeout: 60s + network: + policy: Isolated + + addConsumers: + image: ghcr.io/edgefarm/edgefarm.network/xfn-consumers + tag: 1.0.0-beta.23 + imagePullPolicy: IfNotPresent + timeout: 60s + network: + policy: Isolated + + # Log the output of the composition functions to a webhook. + log2webhook: + # Define the stages where the log2webhook function should be used + # You can define several stages + enabled: + initial: false + afterAddStreams: false + afterAddEdgeNetworks: false + afterAddUsers: false + afterAddConsumers: false + # Use a webhook service like https://webhook.site/ to test the log2webhook function. + webhookURL: https://webhook.site/ + image: ghcr.io/edgefarm/edgefarm.network/xfn-log2webhook + tag: latest + imagePullPolicy: IfNotPresent diff --git a/charts/network-dependencies-webhook-helm/Chart.yaml b/charts/network-dependencies-webhook-helm/Chart.yaml new file mode 100644 index 00000000..fefcde30 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: network-dependencies-webhook-helm +description: A Helm chart for network-dependencies-webhook. Note that you need to have crossplane and provider-nats installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "1.0.0-beta.23" + +keywords: + - edgefarm.network + - crossplane + - provider + - crd + - nats + +sources: + - https://github.com/edgefarm/edgefarm.network/tree/beta/cmd/network-dependencies-webhook + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de diff --git a/charts/network-dependencies-webhook-helm/README.md b/charts/network-dependencies-webhook-helm/README.md new file mode 100644 index 00000000..415d6756 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/README.md @@ -0,0 +1,10 @@ +# network-dependencies-webhook + +This helm chart is part of edgefarm.network and contains network-dependencies-webhook, a kubernetes webhook that prevents edgenetwork pods from deltetion when there are still NATS stream resources available. +Note, that for this to work you need to have Crossplane (>=v1.11.3) installed and configured. + +## Prerequisites + + Kubernetes 1.22+ + Helm 3.2.0+ + Crossplane 1.11.3+ \ No newline at end of file diff --git a/charts/network-dependencies-webhook-helm/templates/_helpers.tpl b/charts/network-dependencies-webhook-helm/templates/_helpers.tpl new file mode 100644 index 00000000..fd87c212 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "network-dependencies-webhook.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "network-dependencies-webhook.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "network-dependencies-webhook.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "network-dependencies-webhook.labels" -}} +helm.sh/chart: {{ include "network-dependencies-webhook.chart" . }} +{{ include "network-dependencies-webhook.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "network-dependencies-webhook.selectorLabels" -}} +app.kubernetes.io/name: {{ include "network-dependencies-webhook.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + diff --git a/charts/network-dependencies-webhook-helm/templates/certificate.yaml b/charts/network-dependencies-webhook-helm/templates/certificate.yaml new file mode 100644 index 00000000..42586db2 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/certificate.yaml @@ -0,0 +1,22 @@ +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ include "network-dependencies-webhook.name" . }}-selfsigned-issuer + namespace: {{ .Release.Namespace }} +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} +spec: + commonName: {{ include "network-dependencies-webhook.name" . }}.{{ .Release.Namespace }}.svc + dnsNames: + - {{ include "network-dependencies-webhook.name" . }}.{{ .Release.Namespace }}.svc.cluster.local + - {{ include "network-dependencies-webhook.name" . }}.{{ .Release.Namespace }}.svc + issuerRef: + kind: Issuer + name: {{ include "network-dependencies-webhook.name" . }}-selfsigned-issuer + secretName: {{ include "network-dependencies-webhook.name" . }}-server-cert diff --git a/charts/network-dependencies-webhook-helm/templates/deployment.yaml b/charts/network-dependencies-webhook-helm/templates/deployment.yaml new file mode 100644 index 00000000..21e3c016 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/deployment.yaml @@ -0,0 +1,62 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "network-dependencies-webhook.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "network-dependencies-webhook.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "network-dependencies-webhook.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "network-dependencies-webhook.name" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: tls + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: webhook-tls-certs + mountPath: /etc/webhook/certs/ + readOnly: true + volumes: + - name: webhook-tls-certs + secret: + secretName: {{ include "network-dependencies-webhook.name" . }}-server-cert + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/network-dependencies-webhook-helm/templates/rbac.yaml b/charts/network-dependencies-webhook-helm/templates/rbac.yaml new file mode 100644 index 00000000..e316545e --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/rbac.yaml @@ -0,0 +1,64 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "network-dependencies-webhook.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "network-dependencies-webhook.name" . }} +rules: + - apiGroups: [""] + resources: + - "pods" + verbs: + - "update" + - "get" + - "watch" + - "list" + - apiGroups: + - "streams.network.edgefarm.io" + resources: + - "xstreams" + - "xconsumers" + verbs: + - "get" + - "list" + - "watch" + - apiGroups: + - "issue.natssecrets.crossplane.io" + resources: + - "users" + - "accounts" + verbs: + - "get" + - "list" + - "watch" + - apiGroups: + - "nats.crossplane.io" + resources: + - "providerconfigs" + verbs: + - "get" + - "list" + - "watch" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "network-dependencies-webhook.name" . }}-binding +subjects: + - kind: ServiceAccount + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} +roleRef: + kind: ClusterRole + name: {{ include "network-dependencies-webhook.name" . }} + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/charts/network-dependencies-webhook-helm/templates/service.yaml b/charts/network-dependencies-webhook-helm/templates/service.yaml new file mode 100644 index 00000000..96eeacd5 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + labels: + {{- include "network-dependencies-webhook.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: {{ .Values.service.targetPort }} + protocol: TCP + name: tls + selector: + {{- include "network-dependencies-webhook.selectorLabels" . | nindent 4 }} diff --git a/charts/network-dependencies-webhook-helm/templates/validation-webhook.yaml b/charts/network-dependencies-webhook-helm/templates/validation-webhook.yaml new file mode 100644 index 00000000..b590b649 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/templates/validation-webhook.yaml @@ -0,0 +1,101 @@ +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: {{ include "network-dependencies-webhook.name" . }} + annotations: + cert-manager.io/inject-ca-from: {{ .Release.Namespace }}/{{ include "network-dependencies-webhook.name" . }} +webhooks: + - name: validating.network.edgefarm.io.v1alpha1.pods + sideEffects: "None" + objectSelector: + matchLabels: + network.edgefarm.io/type: leaf + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + path: /validating-network-edgefarm-io-v1alpha1-pods + caBundle: Cg== + rules: + - apiGroups: [""] + apiVersions: ["v1"] + resources: ["pods"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.users + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + path: /validating-network-edgefarm-io-v1alpha1-users + caBundle: Cg== + rules: + - apiGroups: ["issue.natssecrets.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["users"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.streams + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + path: /validating-network-edgefarm-io-v1alpha1-streams + caBundle: Cg== + rules: + - apiGroups: ["streams.network.edgefarm.io"] + apiVersions: ["v1alpha1"] + resources: ["xstreams"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.providerconfigs + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + path: /validating-network-edgefarm-io-v1alpha1-providerconfigs + caBundle: Cg== + rules: + - apiGroups: ["nats.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["providerconfigs"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.accounts + sideEffects: "None" + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: {{ include "network-dependencies-webhook.name" . }} + namespace: {{ .Release.Namespace }} + path: /validating-network-edgefarm-io-v1alpha1-accounts + caBundle: Cg== + rules: + - apiGroups: ["issue.natssecrets.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["account"] + operations: ["DELETE"] \ No newline at end of file diff --git a/charts/network-dependencies-webhook-helm/values.yaml b/charts/network-dependencies-webhook-helm/values.yaml new file mode 100644 index 00000000..4b784991 --- /dev/null +++ b/charts/network-dependencies-webhook-helm/values.yaml @@ -0,0 +1,56 @@ +replicaCount: 1 +image: + # repository: ghcr.io/edgefarm/edgefarm.network/network-dependencies-webhook + repository: siredmar/network-dependencies-webhook + pullPolicy: IfNotPresent + tag: test1 + # tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Annotations to add to the service account + annotations: {} + +podAnnotations: {} + +podSecurityContext: + {} + # fsGroup: 2000 + +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: + limits: + cpu: 200m + memory: 128M + requests: + cpu: 100m + memory: 64M + +service: + type: ClusterIP + port: 443 + targetPort: 8443 + +autoscaling: + enabled: false + # minReplicas: 1 + # maxReplicas: 100 + # targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/network-resource-info-helm/Chart.yaml b/charts/network-resource-info-helm/Chart.yaml new file mode 100644 index 00000000..b1480957 --- /dev/null +++ b/charts/network-resource-info-helm/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: network-resource-info-helm +description: A Helm chart for network-resource-info. Note that you need to have crossplane and provider-nats installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "1.0.0-beta.23" + +keywords: + - edgefarm.network + - crossplane + - provider + - crd + - nats + +sources: + - https://github.com/edgefarm/edgefarm.network/tree/beta/cmd/network-resource-info + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de diff --git a/charts/network-resource-info-helm/README.md b/charts/network-resource-info-helm/README.md new file mode 100644 index 00000000..81db8176 --- /dev/null +++ b/charts/network-resource-info-helm/README.md @@ -0,0 +1,10 @@ +# network-resource-info + +This helm chart is part of edgefarm.network and contains network-resource-info, a service that interacts closely with the EdgeFarm network-compositions. +Note, that for this to work you need to have Crossplane (>=v1.11.3) installed and configured. + +## Prerequisites + + Kubernetes 1.22+ + Helm 3.2.0+ + Crossplane 1.11.3+ diff --git a/charts/network-resource-info-helm/templates/_helpers.tpl b/charts/network-resource-info-helm/templates/_helpers.tpl new file mode 100644 index 00000000..835b94af --- /dev/null +++ b/charts/network-resource-info-helm/templates/_helpers.tpl @@ -0,0 +1,52 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "network-resource-info.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "network-resource-info.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "network-resource-info.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "network-resource-info.labels" -}} +helm.sh/chart: {{ include "network-resource-info.chart" . }} +{{ include "network-resource-info.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "network-resource-info.selectorLabels" -}} +app.kubernetes.io/name: {{ include "network-resource-info.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + diff --git a/charts/network-resource-info-helm/templates/deployment.yaml b/charts/network-resource-info-helm/templates/deployment.yaml new file mode 100644 index 00000000..de862048 --- /dev/null +++ b/charts/network-resource-info-helm/templates/deployment.yaml @@ -0,0 +1,54 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "network-resource-info.name" . }} + labels: + {{- include "network-resource-info.labels" . | nindent 4 }} + namespace: {{ .Values.namespaceOverride | default .Release.Namespace }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "network-resource-info.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "network-resource-info.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "network-resource-info.name" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/charts/network-resource-info-helm/templates/rbac.yaml b/charts/network-resource-info-helm/templates/rbac.yaml new file mode 100644 index 00000000..b8527728 --- /dev/null +++ b/charts/network-resource-info-helm/templates/rbac.yaml @@ -0,0 +1,41 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "network-resource-info.name" . }} + namespace: {{ .Values.namespaceOverride | default .Release.Namespace }} + labels: + {{- include "network-resource-info.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "network-resource-info.name" . }} +rules: + - apiGroups: + - "" + resources: + - pods + verbs: ["get", "list"] + - apiGroups: + - "streams.network.edgefarm.io" + resources: + - users + - xstreams + verbs: ["get", "list"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "network-resource-info.name" . }}-binding +subjects: + - kind: ServiceAccount + name: {{ include "network-resource-info.name" . }} + namespace: crossplane-system +roleRef: + kind: ClusterRole + name: {{ include "network-resource-info.name" . }} + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/charts/network-resource-info-helm/templates/service.yaml b/charts/network-resource-info-helm/templates/service.yaml new file mode 100644 index 00000000..dd60affc --- /dev/null +++ b/charts/network-resource-info-helm/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "network-resource-info.name" . }} + namespace: {{ .Values.namespaceOverride | default .Release.Namespace }} + labels: + {{- include "network-resource-info.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "network-resource-info.selectorLabels" . | nindent 4 }} diff --git a/charts/network-resource-info-helm/values.yaml b/charts/network-resource-info-helm/values.yaml new file mode 100644 index 00000000..301c6323 --- /dev/null +++ b/charts/network-resource-info-helm/values.yaml @@ -0,0 +1,59 @@ +replicaCount: 1 +image: + repository: ghcr.io/edgefarm/edgefarm.network/network-resource-info + pullPolicy: IfNotPresent + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +namespaceOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: + {} + # fsGroup: 2000 + +securityContext: + {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +resources: + limits: + cpu: 200m + memory: 128Mi + requests: + cpu: 100m + memory: 128Mi + +service: + type: ClusterIP + port: 9090 + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/charts/provider-kubernetes-helm/Chart.yaml b/charts/provider-kubernetes-helm/Chart.yaml new file mode 100644 index 00000000..1a48210f --- /dev/null +++ b/charts/provider-kubernetes-helm/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +name: provider-kubernetes-helm +description: A Helm chart for provider-kubernetes. Note that you need to have crossplane installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "v0.7.0" + +keywords: + - crossplane + - provider + - crd + - kubernetes + +sources: + - https://github.com/crossplane-contrib/provider-kubernetes + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de diff --git a/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_objects.yaml b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_objects.yaml new file mode 100644 index 00000000..755adcf2 --- /dev/null +++ b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_objects.yaml @@ -0,0 +1,447 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: objects.kubernetes.crossplane.io +spec: + group: kubernetes.crossplane.io + names: + categories: + - crossplane + - managed + - kubernetes + kind: Object + listKind: ObjectList + plural: objects + singular: object + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .spec.forProvider.manifest.kind + name: KIND + type: string + - jsonPath: .spec.forProvider.manifest.apiVersion + name: APIVERSION + priority: 1 + type: string + - jsonPath: .spec.forProvider.manifest.metadata.name + name: METANAME + priority: 1 + type: string + - jsonPath: .spec.forProvider.manifest.metadata.namespace + name: METANAMESPACE + priority: 1 + type: string + - jsonPath: .spec.providerConfigRef.name + name: PROVIDERCONFIG + type: string + - jsonPath: .status.conditions[?(@.type=='Synced')].status + name: SYNCED + type: string + - jsonPath: .status.conditions[?(@.type=='Ready')].status + name: READY + type: string + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + name: v1alpha1 + schema: + openAPIV3Schema: + description: A Object is an provider Kubernetes API type + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: A ObjectSpec defines the desired state of a Object. + properties: + connectionDetails: + items: + description: ConnectionDetail represents an entry in the connection + secret for an Object + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of + an entire object, this string should contain a valid JSON/Go + field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen + only to have some well-defined way of referencing a part of + an object. TODO: this design is not final and this field is + subject to change in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference + is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency' + type: string + toConnectionSecretKey: + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + x-kubernetes-map-type: atomic + type: array + deletionPolicy: + default: Delete + description: 'DeletionPolicy specifies what will happen to the underlying + external when this managed resource is deleted - either "Delete" + or "Orphan" the external resource. This field is planned to be deprecated + in favor of the ManagementPolicy field in a future release. Currently, + both could be set independently and non-default values would be + honored if the feature flag is enabled. See the design doc for more + information: https://github.com/crossplane/crossplane/blob/499895a25d1a1a0ba1604944ef98ac7a1a71f197/design/design-doc-observe-only-resources.md?plain=1#L223' + enum: + - Orphan + - Delete + type: string + forProvider: + description: ObjectParameters are the configurable fields of a Object. + properties: + manifest: + description: Raw JSON representation of the kubernetes object + to be created. + type: object + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true + required: + - manifest + type: object + managementPolicy: + default: Default + description: A ManagementPolicy determines what should happen to the + underlying external resource when a managed resource is created, + updated, deleted, or observed. + enum: + - Default + - ObserveCreateUpdate + - ObserveDelete + - Observe + type: string + providerConfigRef: + default: + name: default + description: ProviderConfigReference specifies how the provider that + will be used to create, observe, update, and delete this managed + resource should be configured. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + providerRef: + description: 'ProviderReference specifies the provider that will be + used to create, observe, update, and delete this managed resource. + Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef`' + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this + reference is required. The default is 'Required', which + means the reconcile will fail if the reference cannot be + resolved. 'Optional' means this reference will be a no-op + if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will attempt + to resolve the reference only when the corresponding field + is not present. Use 'Always' to resolve the reference on + every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + publishConnectionDetailsTo: + description: PublishConnectionDetailsTo specifies the connection secret + config which contains a name, metadata and a reference to secret + store config to which any connection details for this managed resource + should be written. Connection details frequently include the endpoint, + username, and password required to connect to the managed resource. + properties: + configRef: + default: + name: default + description: SecretStoreConfigRef specifies which secret store + config should be used for this ConnectionSecret. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of + this reference is required. The default is 'Required', + which means the reconcile will fail if the reference + cannot be resolved. 'Optional' means this reference + will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should + be resolved. The default is 'IfNotPresent', which will + attempt to resolve the reference only when the corresponding + field is not present. Use 'Always' to resolve the reference + on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + metadata: + description: Metadata is the metadata for connection secret. + properties: + annotations: + additionalProperties: + type: string + description: Annotations are the annotations to be added to + connection secret. - For Kubernetes secrets, this will be + used as "metadata.annotations". - It is up to Secret Store + implementation for others store types. + type: object + labels: + additionalProperties: + type: string + description: Labels are the labels/tags to be added to connection + secret. - For Kubernetes secrets, this will be used as "metadata.labels". + - It is up to Secret Store implementation for others store + types. + type: object + type: + description: Type is the SecretType for the connection secret. + - Only valid for Kubernetes Secret Stores. + type: string + type: object + name: + description: Name is the name of the connection secret. + type: string + required: + - name + type: object + readiness: + description: Readiness defines how the object's readiness condition + should be computed, if not specified it will be considered ready + as soon as the underlying external resource is considered up-to-date. + properties: + policy: + default: SuccessfulCreate + description: Policy defines how the Object's readiness condition + should be computed. + enum: + - SuccessfulCreate + - DeriveFromObject + type: string + type: object + references: + items: + description: Reference refers to an Object or arbitrary Kubernetes + resource and optionally patch values from that resource to the + current Object. + properties: + dependsOn: + description: DependsOn is used to declare dependency on other + Object or arbitrary Kubernetes resource. + properties: + apiVersion: + default: kubernetes.crossplane.io/v1alpha1 + description: APIVersion of the referenced object. + type: string + kind: + default: Object + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. + type: string + required: + - name + type: object + patchesFrom: + description: PatchesFrom is used to declare dependency on other + Object or arbitrary Kubernetes resource, and also patch fields + from this object. + properties: + apiVersion: + default: kubernetes.crossplane.io/v1alpha1 + description: APIVersion of the referenced object. + type: string + fieldPath: + description: FieldPath is the path of the field on the resource + whose value is to be used as input. + type: string + kind: + default: Object + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + namespace: + description: Namespace of the referenced object. + type: string + required: + - fieldPath + - name + type: object + toFieldPath: + description: ToFieldPath is the path of the field on the resource + whose value will be changed with the result of transforms. + Leave empty if you'd like to propagate to the same path as + patchesFrom.fieldPath. + type: string + type: object + type: array + writeConnectionSecretToRef: + description: WriteConnectionSecretToReference specifies the namespace + and name of a Secret to which any connection details for this managed + resource should be written. Connection details frequently include + the endpoint, username, and password required to connect to the + managed resource. This field is planned to be replaced in a future + release in favor of PublishConnectionDetailsTo. Currently, both + could be set independently and connection details would be published + to both without affecting each other. + properties: + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - name + - namespace + type: object + required: + - forProvider + type: object + status: + description: A ObjectStatus represents the observed state of a Object. + properties: + atProvider: + description: ObjectObservation are the observable fields of a Object. + properties: + manifest: + description: Raw JSON representation of the remote object. + type: object + x-kubernetes-embedded-resource: true + x-kubernetes-preserve-unknown-fields: true + type: object + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: A Reason for this condition's last transition from + one status to another. + type: string + status: + description: Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigs.yaml b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigs.yaml new file mode 100644 index 00000000..e7ac72b6 --- /dev/null +++ b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigs.yaml @@ -0,0 +1,230 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: providerconfigs.kubernetes.crossplane.io +spec: + group: kubernetes.crossplane.io + names: + categories: + - crossplane + - provider + - kubernetes + kind: ProviderConfig + listKind: ProviderConfigList + plural: providerconfigs + singular: providerconfig + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .spec.credentials.secretRef.name + name: SECRET-NAME + priority: 1 + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A ProviderConfig configures a Template provider. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: A ProviderConfigSpec defines the desired state of a ProviderConfig. + properties: + credentials: + description: + Credentials used to connect to the Kubernetes API. Typically + a kubeconfig file. Use InjectedIdentity for in-cluster config. + properties: + env: + description: + Env is a reference to an environment variable that + contains credentials that must be used to connect to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: + Fs is a reference to a filesystem location that contains + credentials that must be used to connect to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: + A SecretRef is a reference to a secret key that contains + the credentials that must be used to connect to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the provider credentials. + enum: + - None + - Secret + - InjectedIdentity + - Environment + - Filesystem + type: string + required: + - source + type: object + identity: + description: + Identity used to authenticate to the Kubernetes API. + The identity credentials can be used to supplement kubeconfig 'credentials', + for example by configuring a bearer token source such as OAuth. + properties: + env: + description: + Env is a reference to an environment variable that + contains credentials that must be used to connect to the provider. + properties: + name: + description: Name is the name of an environment variable. + type: string + required: + - name + type: object + fs: + description: + Fs is a reference to a filesystem location that contains + credentials that must be used to connect to the provider. + properties: + path: + description: Path is a filesystem path. + type: string + required: + - path + type: object + secretRef: + description: + A SecretRef is a reference to a secret key that contains + the credentials that must be used to connect to the provider. + properties: + key: + description: The key to select. + type: string + name: + description: Name of the secret. + type: string + namespace: + description: Namespace of the secret. + type: string + required: + - key + - name + - namespace + type: object + source: + description: Source of the provider credentials. + enum: + - None + - Secret + - InjectedIdentity + - Environment + - Filesystem + type: string + type: + description: Type of identity. + enum: + - GoogleApplicationCredentials + type: string + required: + - source + - type + type: object + required: + - credentials + type: object + status: + description: A ProviderConfigStatus reflects the observed state of a ProviderConfig. + properties: + conditions: + description: Conditions of the resource. + items: + description: A Condition that may apply to a resource. + properties: + lastTransitionTime: + description: + LastTransitionTime is the last time this condition + transitioned from one status to another. + format: date-time + type: string + message: + description: + A Message containing details about this condition's + last transition from one status to another, if any. + type: string + reason: + description: + A Reason for this condition's last transition from + one status to another. + type: string + status: + description: + Status of this condition; is it currently True, + False, or Unknown? + type: string + type: + description: + Type of this condition. At most one of each condition + type may apply to a resource at any point in time. + type: string + required: + - lastTransitionTime + - reason + - status + - type + type: object + type: array + users: + description: Users of this provider configuration. + format: int64 + type: integer + type: object + required: + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigusages.yaml b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigusages.yaml new file mode 100644 index 00000000..6c43b6e0 --- /dev/null +++ b/charts/provider-kubernetes-helm/crds/kubernetes.crossplane.io_providerconfigusages.yaml @@ -0,0 +1,111 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.11.3 + creationTimestamp: null + name: providerconfigusages.kubernetes.crossplane.io +spec: + group: kubernetes.crossplane.io + names: + categories: + - crossplane + - provider + - kubernetes + kind: ProviderConfigUsage + listKind: ProviderConfigUsageList + plural: providerconfigusages + singular: providerconfigusage + scope: Cluster + versions: + - additionalPrinterColumns: + - jsonPath: .metadata.creationTimestamp + name: AGE + type: date + - jsonPath: .providerConfigRef.name + name: CONFIG-NAME + type: string + - jsonPath: .resourceRef.kind + name: RESOURCE-KIND + type: string + - jsonPath: .resourceRef.name + name: RESOURCE-NAME + type: string + name: v1alpha1 + schema: + openAPIV3Schema: + description: A ProviderConfigUsage indicates that a resource is using a ProviderConfig. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + providerConfigRef: + description: ProviderConfigReference to the provider config being used. + properties: + name: + description: Name of the referenced object. + type: string + policy: + description: Policies for referencing. + properties: + resolution: + default: Required + description: Resolution specifies whether resolution of this reference + is required. The default is 'Required', which means the reconcile + will fail if the reference cannot be resolved. 'Optional' means + this reference will be a no-op if it cannot be resolved. + enum: + - Required + - Optional + type: string + resolve: + description: Resolve specifies when this reference should be resolved. + The default is 'IfNotPresent', which will attempt to resolve + the reference only when the corresponding field is not present. + Use 'Always' to resolve the reference on every reconcile. + enum: + - Always + - IfNotPresent + type: string + type: object + required: + - name + type: object + resourceRef: + description: ResourceReference to the managed resource using the provider + config. + properties: + apiVersion: + description: APIVersion of the referenced object. + type: string + kind: + description: Kind of the referenced object. + type: string + name: + description: Name of the referenced object. + type: string + uid: + description: UID of the referenced object. + type: string + required: + - apiVersion + - kind + - name + type: object + required: + - providerConfigRef + - resourceRef + type: object + served: true + storage: true + subresources: {} diff --git a/charts/provider-kubernetes-helm/templates/_helpers.tpl b/charts/provider-kubernetes-helm/templates/_helpers.tpl new file mode 100644 index 00000000..f3e92de5 --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "provider-kubernetes.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "provider-kubernetes.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "provider-kubernetes.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "provider-kubernetes.labels" -}} +helm.sh/chart: {{ include "provider-kubernetes.chart" . }} +{{ include "provider-kubernetes.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "provider-kubernetes.selectorLabels" -}} +app.kubernetes.io/name: {{ include "provider-kubernetes.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +# {{/* +# Create the name of the service account to use +# */}} +# {{- define "provider-kubernetes.serviceAccountName" -}} +# {{- if .Values.serviceAccount.create }} +# {{- default (include "provider-kubernetes.fullname" .) .Values.serviceAccount.name }} +# {{- else }} +# {{- default "default" .Values.serviceAccount.name }} +# {{- end }} +# {{- end }} diff --git a/charts/provider-kubernetes-helm/templates/clusterrole.yaml b/charts/provider-kubernetes-helm/templates/clusterrole.yaml new file mode 100644 index 00000000..0a5d266b --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/clusterrole.yaml @@ -0,0 +1,84 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: crossplane:provider:provider-kubernetes:system + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} +rules: + - apiGroups: + - kubernetes.crossplane.io + resources: + - objects + - objects/status + - providerconfigs + - providerconfigs/status + - providerconfigusages + - providerconfigusages/status + verbs: + - get + - list + - watch + - update + - patch + - create + - apiGroups: + - "" + - coordination.k8s.io + resources: + - secrets + - configmaps + - events + - leases + verbs: + - "*" + - apiGroups: + - "*" + resources: + - "*" + verbs: + - "*" + - nonResourceURLs: + - "*" + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.crossplane.io/aggregate-to-admin: "true" + rbac.crossplane.io/aggregate-to-crossplane: "true" + rbac.crossplane.io/aggregate-to-edit: "true" + name: crossplane:provider:provider-kubernetes:aggregate-to-edit +rules: + - apiGroups: + - kubernetes.crossplane.io + resources: + - objects + - objects/status + - providerconfigs + - providerconfigs/status + - providerconfigusages + - providerconfigusages/status + verbs: + - "*" +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + rbac.crossplane.io/aggregate-to-view: "true" + name: crossplane:provider:provider-kubernetes:aggregate-to-view +rules: + - apiGroups: + - kubernetes.crossplane.io + resources: + - objects + - objects/status + - providerconfigs + - providerconfigs/status + - providerconfigusages + - providerconfigusages/status + verbs: + - get + - list + - watch diff --git a/charts/provider-kubernetes-helm/templates/clusterrolebinding.yaml b/charts/provider-kubernetes-helm/templates/clusterrolebinding.yaml new file mode 100644 index 00000000..94ab0154 --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/clusterrolebinding.yaml @@ -0,0 +1,13 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: crossplane:provider:provider-kubernetes:system + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: crossplane:provider:provider-kubernetes:system +subjects: + - kind: ServiceAccount + name: provider-kubernetes + namespace: crossplane-system diff --git a/charts/provider-kubernetes-helm/templates/controllerconfig.yaml b/charts/provider-kubernetes-helm/templates/controllerconfig.yaml new file mode 100644 index 00000000..cbd06c65 --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/controllerconfig.yaml @@ -0,0 +1,12 @@ +apiVersion: pkg.crossplane.io/v1alpha1 +kind: ControllerConfig +metadata: + name: provider-kubernetes + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.provider.replicas }} + args: + {{- range $i, $v := $.Values.provider.args }} + - {{ $v }} + {{- end }} + serviceAccountName: provider-kubernetes diff --git a/charts/provider-kubernetes-helm/templates/provider.yaml b/charts/provider-kubernetes-helm/templates/provider.yaml new file mode 100644 index 00000000..030b2274 --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/provider.yaml @@ -0,0 +1,12 @@ +apiVersion: pkg.crossplane.io/v1 +kind: Provider +metadata: + name: provider-kubernetes + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} +spec: + package: {{ .Values.provider.package }}:{{ .Values.provider.tag | default .Chart.AppVersion }} + packagePullPolicy: {{ .Values.provider.packagePullPolicy }} + revisionActivationPolicy: Automatic + revisionHistoryLimit: 1 + controllerConfigRef: + name: provider-kubernetes diff --git a/charts/provider-kubernetes-helm/templates/providerconfig.yaml b/charts/provider-kubernetes-helm/templates/providerconfig.yaml new file mode 100644 index 00000000..cea274cb --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/providerconfig.yaml @@ -0,0 +1,8 @@ +apiVersion: kubernetes.crossplane.io/v1alpha1 +kind: ProviderConfig +metadata: + name: provider-kubernetes + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} +spec: + credentials: + source: InjectedIdentity diff --git a/charts/provider-kubernetes-helm/templates/serviceaccount.yaml b/charts/provider-kubernetes-helm/templates/serviceaccount.yaml new file mode 100644 index 00000000..14f35a87 --- /dev/null +++ b/charts/provider-kubernetes-helm/templates/serviceaccount.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: provider-kubernetes + namespace: crossplane-system + labels: {{- include "provider-kubernetes.labels" . | nindent 4 }} diff --git a/charts/provider-kubernetes-helm/values.yaml b/charts/provider-kubernetes-helm/values.yaml new file mode 100644 index 00000000..21f72d54 --- /dev/null +++ b/charts/provider-kubernetes-helm/values.yaml @@ -0,0 +1,7 @@ +provider: + package: crossplanecontrib/provider-kubernetes + # Overrides the image tag whose default is the chart appVersion. + tag: "" + packagePullPolicy: IfNotPresent + replicas: 1 + args: [] diff --git a/charts/vault/Chart.yaml b/charts/vault/Chart.yaml new file mode 100644 index 00000000..e43509c9 --- /dev/null +++ b/charts/vault/Chart.yaml @@ -0,0 +1,20 @@ +apiVersion: v2 +name: vault +description: A Helm chart for vault using bank-vaults vault operator. Note that you need to have bank-vaults operator (https://bank-vaults.dev/docs/operator/) installed to be able to install this chart. + +type: application +version: 0.1.0 +appVersion: "0.1.0" + +keywords: + - edgefarm.network + - bank-vaults + - vault + - nats + +sources: + - https://github.com/banzaicloud/bank-vaults/ + +maintainers: + - name: Armin Schlegel + email: armin.schlegel@gmx.de diff --git a/charts/vault/templates/_helpers.tpl b/charts/vault/templates/_helpers.tpl new file mode 100644 index 00000000..94657d0d --- /dev/null +++ b/charts/vault/templates/_helpers.tpl @@ -0,0 +1,64 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "vault.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "vault.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "vault.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "vault.labels" -}} +helm.sh/chart: {{ include "vault.chart" . }} +{{ include "vault.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "vault.selectorLabels" -}} +app.kubernetes.io/name: {{ include "vault.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +nats-auth-config policy +*/}} +{{- define "vault.nats-auth-config-policy" -}} +vault: + externalConfig: + policies: + - name: nats-auth-config + rules: path "nats-secrets/jwt/operator/{{ .Values.nats.operatorName }}" { + capabilities = ["read"] + } +{{- end }} \ No newline at end of file diff --git a/charts/vault/templates/certificate.yaml b/charts/vault/templates/certificate.yaml new file mode 100644 index 00000000..f1b64f5a --- /dev/null +++ b/charts/vault/templates/certificate.yaml @@ -0,0 +1,18 @@ +{{- if .Values.certificate.enabled }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ .Values.certificate.name }} + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +spec: + secretName: {{ .Values.certificate.name }} + dnsNames: + {{ toYaml .Values.certificate.config.dnsNames | nindent 4 }} + ipAddresses: + {{ toYaml .Values.certificate.config.ipAddresses | nindent 4 }} + issuerRef: + name: {{ .Values.certificate.config.issuerRef.name }} + kind: {{ .Values.certificate.config.issuerRef.kind }} +{{- end }} \ No newline at end of file diff --git a/charts/vault/templates/cluster-issuer.yaml b/charts/vault/templates/cluster-issuer.yaml new file mode 100644 index 00000000..ce8f1fc1 --- /dev/null +++ b/charts/vault/templates/cluster-issuer.yaml @@ -0,0 +1,11 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: {{ .Values.certificate.config.issuerRef.name }} +spec: +{{- if .Values.certificate.config.issuerRef.secret }} + ca: + secretName: {{ .Values.certificate.config.issuerRef.secret }} +{{- else }} + selfSigned: {} +{{- end }} diff --git a/charts/vault/templates/persistentvolume.yaml b/charts/vault/templates/persistentvolume.yaml new file mode 100644 index 00000000..99f40425 --- /dev/null +++ b/charts/vault/templates/persistentvolume.yaml @@ -0,0 +1,13 @@ +{{- if .Values.storage.enabled }} +{{- if .Values.storage.persistentVolume.enabled }} +apiVersion: v1 +kind: PersistentVolume +metadata: + name: vault-file + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +spec: + {{ toYaml .Values.storage.persistentVolume.spec | nindent 2}} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/charts/vault/templates/persistentvolumeclaim.yaml b/charts/vault/templates/persistentvolumeclaim.yaml new file mode 100644 index 00000000..b324724e --- /dev/null +++ b/charts/vault/templates/persistentvolumeclaim.yaml @@ -0,0 +1,13 @@ +{{- if .Values.storage.enabled }} +{{- if .Values.storage.persistentVolumeClaim.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: vault-file + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +spec: + {{ toYaml .Values.storage.persistentVolumeClaim.spec | nindent 2 }} +{{- end }} +{{- end }} diff --git a/charts/vault/templates/rbac.yaml b/charts/vault/templates/rbac.yaml new file mode 100644 index 00000000..7de3d684 --- /dev/null +++ b/charts/vault/templates/rbac.yaml @@ -0,0 +1,63 @@ +kind: ServiceAccount +apiVersion: v1 +metadata: + name: {{ .Values.vault.serviceAccount }} + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.vault.serviceAccount }} + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +rules: + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["bank-vaults"] + verbs: ["*"] + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["vault-unseal-keys"] + verbs: ["*"] + - apiGroups: [""] + resources: ["secrets"] + resourceNames: [""] + verbs: ["create"] + - apiGroups: ["v1"] + resources: ["pods"] + verbs: ["get", "update", "patch"] +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ .Values.vault.serviceAccount }} + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +roleRef: + kind: Role + name: {{ .Values.vault.serviceAccount }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ .Values.vault.serviceAccount }} +--- +# This binding allows the deployed Vault instance to authenticate clients +# through Kubernetes ServiceAccounts (if configured so). +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Values.vault.serviceAccount }}-auth-delegator + labels: + {{- include "vault.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: {{ .Values.vault.serviceAccount }} + namespace: default diff --git a/charts/vault/templates/vault.yaml b/charts/vault/templates/vault.yaml new file mode 100644 index 00000000..568c788a --- /dev/null +++ b/charts/vault/templates/vault.yaml @@ -0,0 +1,39 @@ +apiVersion: "vault.banzaicloud.com/v1alpha1" +kind: "Vault" +metadata: + name: {{ include "vault.name" . }} + namespace: {{ .Values.namespace }} + labels: + {{- include "vault.labels" . | nindent 4 }} +spec: +{{- if .Values.nats.enabled }} +{{- $operator := .Values.nats.operatorName }} +{{- $rules := printf "path \"nats-secrets/jwt/operator/%s\" { capabilities = [\"read\"] }\npath \"nats-secrets/nkey/operator/%s/account/sys\" { capabilities = [\"read\"] }\npath \"nats-secrets/jwt/operator/%s/account/sys\" { capabilities = [\"read\"] }" $operator $operator $operator }} +{{- $additionalPolicy := list (dict "name" "nats-auth-config" "rules" $rules) }} +{{- if .Values.vault.externalConfig.policies }} + {{- $mergedPolicies := concat .Values.vault.externalConfig.policies $additionalPolicy }} + {{- $_ := set .Values.vault.externalConfig "policies" $mergedPolicies }} +{{- else }} + {{- $_ := set .Values.vault.externalConfig "policies" $additionalPolicy }} +{{- end }} +{{- end }} + +{{- range $index, $auth := .Values.vault.externalConfig.auth }} + {{- if eq $auth.type "kubernetes" }} + {{- if $auth.config.kubernetes_ca_cert }} + {{- $_ := set $auth.config "kubernetes_ca_cert" $auth.config.kubernetes_ca_cert }} + {{- else }} + {{- $kubeServiceAccount := lookup "v1" "ServiceAccount" "default" "default" }} + {{- if $kubeServiceAccount }} + {{- $kubeSecretName := index $kubeServiceAccount.secrets 0 "name" }} + {{- $kubeSecret := lookup "v1" "Secret" "default" $kubeSecretName }} + {{- if and $kubeSecret (hasKey $kubeSecret.data "ca.crt") }} + {{- $kubeCaCert := (index $kubeSecret.data "ca.crt") | b64dec }} + {{- $_ := set $auth.config "kubernetes_ca_cert" $kubeCaCert }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} + +{{- toYaml .Values.vault | nindent 2 }} \ No newline at end of file diff --git a/charts/vault/values.yaml b/charts/vault/values.yaml new file mode 100644 index 00000000..737f94f6 --- /dev/null +++ b/charts/vault/values.yaml @@ -0,0 +1,260 @@ +namespace: vault + +vault: + # globalConfig: + # configure: + # auth: + # kubernetes: + # passClusterCACert: true + + # spec: + size: 1 + # Use the custom vault image containing the NATS secrets plugin + image: ghcr.io/edgefarm/vault-plugin-secrets-nats/vault-with-nats-secrets:1.3.3 + + # Common annotations for all created resources + annotations: + common/annotation: "true" + + # Vault Pods , Services and TLS Secret annotations + vaultAnnotations: + type/instance: "vault" + + # Vault Configurer Pods and Services annotations + vaultConfigurerAnnotations: + type/instance: "vaultconfigurer" + + # Vault Pods , Services and TLS Secret labels + vaultLabels: + example.com/log-format: "json" + + # Vault Configurer Pods and Services labels + vaultConfigurerLabels: + example.com/log-format: "string" + + # Support for affinity Rules, same as in PodSpec + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key : "node-role.kubernetes.io/your_role" + # operator: In + # values: ["true"] + + # Support for pod nodeSelector rules to control which nodes can be chosen to run + # the given pods + # nodeSelector: + # "node-role.kubernetes.io/your_role": "true" + + # Support for node tolerations that work together with node taints to control + # the pods that can like on a node + # tolerations: + # - effect: NoSchedule + # key: node-role.kubernetes.io/your_role + # operator: Equal + # value: "true" + + # Specify existing secret contains TLS certificate. Can be generated using tls.enabled=true + existingTlsSecretName: vault-server-tls + + # Specify the ServiceAccount where the Vault Pod and the Bank-Vaults configurer/unsealer is running + serviceAccount: vault + + # Specify the Service's type where the Vault Service is exposed + # Please note that some Ingress controllers like https://github.com/kubernetes/ingress-gce + # forces you to expose your Service on a NodePort + serviceType: ClusterIP + + # Specify existing secret contains TLS certificate (accepted secret type: kubernetes.io/tls) + # If it is set, generating certificate will be disabled + # existingTlsSecretName: selfsigned-cert-tls + + # Specify threshold for renewing certificates. Valid time units are "ns", "us", "ms", "s", "m", "h". + # tlsExpiryThreshold: 168h + + # Request an Ingress controller with the default configuration + ingress: + # Specify Ingress object annotations here, if TLS is enabled (which is by default) + # the operator will add NGINX, Traefik and HAProxy Ingress compatible annotations + # to support TLS backends + annotations: {} + # Override the default Ingress specification here + # This follows the same format as the standard Kubernetes Ingress + # See: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.13/#ingressspec-v1beta1-extensions + spec: {} + + # Use local disk to store Vault file data, see config section. + volumes: + - name: vault-file + persistentVolumeClaim: + claimName: vault-file + + volumeMounts: + - name: vault-file + mountPath: /vault/file + + # Support for distributing the generated CA certificate Secret to other namespaces. + # Define a list of namespaces or use ["*"] for all namespaces. + caNamespaces: + - "*" # All namespaces + + # Describe where you would like to store the Vault unseal keys and root token. + unsealConfig: + options: + # The preFlightChecks flag enables unseal and root token storage tests + # This is true by default + preFlightChecks: true + # The storeRootToken flag enables storing of root token in chosen storage + # This is true by default + storeRootToken: true + kubernetes: + secretNamespace: vault + secretName: bank-vaults + + # A YAML representation of a final vault config file. + # See https://www.vaultproject.io/docs/configuration/ for more information. + config: + disable_mlock: true + plugin_directory: "/etc/vault/vault_plugins" + storage: + file: + path: "${ .Env.VAULT_STORAGE_FILE }" # An example how Vault config environment interpolation can be used + listener: + tcp: + address: "0.0.0.0:8200" + tls_cert_file: /vault/tls/server.crt + tls_key_file: /vault/tls/server.key + telemetry: + statsd_address: localhost:9125 + ui: true + api_addr: "https://0.0.0.0:8200" + + externalConfig: + plugins: + - plugin_name: vault-plugin-secrets-nats + command: vault-plugin-secrets-nats --tls-skip-verify --ca-cert=/vault/tls/ca.crt + sha256: efd2c118d26c9bd08205294912535bb7f8557b87e1fcdf16ad46487f3afcb58d + type: secret + + # add additional policies here + policies: + [] + # - name: mypolicy + # rules: path "mypath/*" { + # capabilities = ["create", "read", "update", "delete", "list"] + # } + # groups: + # - name: nats-secret-admin + # policies: + # - allow_nats_secrets + # metadata: + # privileged: true + # type: external + + # group-aliases: + # - name: nats-secret-admin + # mountpath: token + # group: nats-secret-admin + + auth: + - type: kubernetes + config: + kubernetes_host: https://10.96.0.1 + disable_local_ca_jwt: true + # kubernetes_ca_cert: | + # -----BEGIN CERTIFICATE----- + # ... + # -----END CERTIFICATE----- + roles: + # Allow nats provider pods in the crossplane-system namespace to use the secret nats-secrets backend + - name: nats-auth-config + bound_service_account_names: ["nats-auth-config"] + bound_service_account_namespaces: ["*"] + policies: ["nats-auth-config"] + ttl: 24h + + secrets: + - path: nats-secrets + type: plugin + plugin_name: vault-plugin-secrets-nats + description: NATS secrets backend + + vaultEnvsConfig: + - name: VAULT_LOG_LEVEL + value: trace + - name: VAULT_STORAGE_FILE + value: "/vault/file" + + # If you are using a custom certificate and are setting the hostname in a custom way + sidecarEnvsConfig: + - name: VAULT_ADDR + value: https://vault.vault:8200 + + # https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + vaultPodSpec: + hostAliases: + - ip: "127.0.0.1" + hostnames: + - "vault.vault" + + # Marks presence of Istio, which influences things like port namings + resources: + vault: + limits: + cpu: "0.5" + memory: 1024Mi + requests: + cpu: "0.25" + memory: 1024Mi + istioEnabled: false + +certificate: + # if enabled, generate a new certificate for the Vault server to use + enabled: true + name: vault-server-tls + config: + # use selfSigned to generate a self-signed issuer. + selfSigned: true + issuerRef: + name: ca-issuer + kind: ClusterIssuer + # Use secret to provide a custom CA certificate instead of generating one + # secret: root-ca + dnsNames: + - vault + - "*.nip.io" + - vault.vault + ipAddresses: + - 127.0.0.1 + +storage: + enabled: true + persistentVolumeClaim: + enabled: true + spec: + # storageClassName: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + + persistentVolume: + enabled: true + spec: + capacity: + storage: 1Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Recycle + hostPath: + path: /vault/file + +nats: + enabled: true + operatorName: operator + +config: + auth: + kubernetes: diff --git a/ci/collect.sh b/ci/collect.sh new file mode 100755 index 00000000..ee23d389 --- /dev/null +++ b/ci/collect.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +cp deploy/compositions/consumer.streams.network.edgefarm.io/definition.yaml charts/compositions/templates/definition-consumer.yaml +cp deploy/compositions/consumer.streams.network.edgefarm.io/composition-consumer.yaml charts/compositions/templates/composition-consumer.yaml + +cp deploy/compositions/edgenetwork.streams.network.edgefarm.io/composition-edgenetworks.yaml charts/compositions/templates/composition-edgenetworks.yaml +cp deploy/compositions/edgenetwork.streams.network.edgefarm.io/definition.yaml charts/compositions/templates/definition-edgenetwork.yaml + +cp deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml charts/compositions/templates/composition-networks.yaml +cp deploy/compositions/network.streams.network.edgefarm.io/definition.yaml charts/compositions/templates/definition-network.yaml + +cp deploy/compositions/stream.streams.network.edgefarm.io/composition-streams.yaml charts/compositions/templates/composition-streams.yaml +cp deploy/compositions/stream.streams.network.edgefarm.io/definition.yaml charts/compositions/templates/definition-streams.yaml diff --git a/cmd/network-dependencies-webhook/README.md b/cmd/network-dependencies-webhook/README.md new file mode 100644 index 00000000..dd9c4d33 --- /dev/null +++ b/cmd/network-dependencies-webhook/README.md @@ -0,0 +1,28 @@ +# network-dependencies-webhook + +This validation webhook takes care of the order of deleteion of the network components that have dependencies between them. + +## How it works + +There are a number of resources that take care of the network components in the cluster. These resources are: + +1. account (accounts.issue.natssecrets.crossplane.io/v1alpha1) +2. system-user (users.issue.natssecrets.crossplane.io/v1alpha1) +3. sys-account-user (users.issue.natssecrets.crossplane.io/v1alpha1) +4. custom users (users.issue.natssecrets.crossplane.io/v1alpha1) +5. providerConfig for nats (providerconfigs.nats.crossplane.io/v1alpha1) +6. edgeNetwork creating leaf-nats pods (xedgenetworks.streams.network.edgefarm.io/v1alpha1) +7. streams (xstreams.streams.network.edgefarm.io/v1alpha1) +8. consumers (xconsumers.streams.network.edgefarm.io/v1alpha1) + +The webhook intercepts delete requests for each resource and takes allows or blocks the deletion based on the dependencies between the resources. The dependencies are as follows: + +- always allow delete custom users +- always allow delete consumers +- no consumers for streams? allow delete streams +- no streams? allow delete pods +- no streams? allow delete providerConfig +- no providerConfig? allow delete system-user +- no system-user and no custom user? allow delete account +- no account? allow delete sys-account-user + diff --git a/cmd/network-dependencies-webhook/devspace.yaml b/cmd/network-dependencies-webhook/devspace.yaml index de79637b..a26233b6 100644 --- a/cmd/network-dependencies-webhook/devspace.yaml +++ b/cmd/network-dependencies-webhook/devspace.yaml @@ -24,9 +24,9 @@ pipelines: dev: network-dependencies-webhook: labelSelector: - app: network-dependencies-webhook + app.kubernetes.io/name: network-dependencies-webhook devImage: ghcr.io/loft-sh/devspace-containers/go:1.20-alpine - namespace: crossplane-system + namespace: edgefarm-network terminal: {} ports: - port: "2346" diff --git a/cmd/network-dependencies-webhook/main.go b/cmd/network-dependencies-webhook/main.go index db454293..a7691818 100644 --- a/cmd/network-dependencies-webhook/main.go +++ b/cmd/network-dependencies-webhook/main.go @@ -12,7 +12,7 @@ import ( "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - v "github.com/edgefarm/edgefarm.network/cmd/network-dependencies-webhook/pkg/validate" + "github.com/edgefarm/edgefarm.network/cmd/network-dependencies-webhook/pkg/validate" ) type ServerParameters struct { @@ -57,8 +57,20 @@ func main() { } config = c } - http.HandleFunc("/validate", func(w http.ResponseWriter, r *http.Request) { - v.Validate(config, w, r) + http.HandleFunc("/validating-network-edgefarm-io-v1alpha1-pods", func(w http.ResponseWriter, r *http.Request) { + validate.Pods(config, w, r) + }) + http.HandleFunc("/validating-network-edgefarm-io-v1alpha1-accounts", func(w http.ResponseWriter, r *http.Request) { + validate.Accounts(config, w, r) + }) + http.HandleFunc("/validating-network-edgefarm-io-v1alpha1-users", func(w http.ResponseWriter, r *http.Request) { + validate.Users(config, w, r) + }) + http.HandleFunc("/validating-network-edgefarm-io-v1alpha1-providerconfigs", func(w http.ResponseWriter, r *http.Request) { + validate.ProviderConfigs(config, w, r) + }) + http.HandleFunc("/validating-network-edgefarm-io-v1alpha1-streams", func(w http.ResponseWriter, r *http.Request) { + validate.Streams(config, w, r) }) err := http.ListenAndServeTLS(":"+strconv.Itoa(parameters.port), parameters.certFile, parameters.keyFile, nil) if err != nil { diff --git a/cmd/network-dependencies-webhook/pkg/validate/accounts.go b/cmd/network-dependencies-webhook/pkg/validate/accounts.go new file mode 100644 index 00000000..15d90873 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/accounts.go @@ -0,0 +1,58 @@ +package validate + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/api/admission/v1beta1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// Accounts handles the deletion of accounts +// accounts can only be deleted if no user depends on it +func Accounts(config *rest.Config, w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + codecFactory := serializer.NewCodecFactory(scheme) + deserializer := codecFactory.UniversalDeserializer() + admissionReview, err := admissionReviewFromRequest(r, deserializer) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) + return + } + + if admissionReview.Request.Operation != v1beta1.Delete { + writeAllowedResponse(w, admissionReview) + return + } + + name := admissionReview.Request.Name + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) + return + } + + account, err := GetAccountByName(context.Background(), dynamic, name) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get account: %v", err)) + return + } + + uid := account.ObjectMeta.UID + + users, err := GetUsersByDependsOnUidLabel(context.Background(), dynamic, string(uid)) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get user: %v", err)) + return + } + if len(users.Items) > 0 { + writeDeniedResponse(w, admissionReview, "cannot delete account %s: users still present", name) + return + } + writeAllowedResponse(w, admissionReview) +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/admission.go b/cmd/network-dependencies-webhook/pkg/validate/admission.go new file mode 100644 index 00000000..52ba8827 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/admission.go @@ -0,0 +1,74 @@ +package validate + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +func admissionReviewFromRequest(r *http.Request, deserializer runtime.Decoder) (*v1beta1.AdmissionReview, error) { + var body []byte + if r.Body != nil { + defer r.Body.Close() + body = readAll(r.Body) + } + admissionReview := &v1beta1.AdmissionReview{} + if _, _, err := deserializer.Decode(body, nil, admissionReview); err != nil { + return nil, err + } + return admissionReview, nil +} + +func writeAllowedResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) { + admissionReview.Response = &v1beta1.AdmissionResponse{ + UID: admissionReview.Request.UID, + Allowed: true, + } + writeResponse(w, admissionReview) +} + +func writeDeniedResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview, reason string, args ...interface{}) { + admissionReview.Response = &v1beta1.AdmissionResponse{ + UID: admissionReview.Request.UID, + Allowed: false, + Result: &metav1.Status{ + Message: fmt.Sprintf(reason, args...), + }, + } + writeResponse(w, admissionReview) +} + +func writeErrorResponse(w http.ResponseWriter, err error) { + w.WriteHeader(http.StatusBadRequest) + w.Write([]byte(err.Error())) +} + +func writeResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) { + w.Header().Set("Content-Type", "application/json") + responseBytes, err := json.Marshal(admissionReview) + if err != nil { + panic(fmt.Sprintf("can't encode response: %v", err)) + } + w.Write(responseBytes) +} + +func readAll(reader io.Reader) []byte { + var buf []byte + for { + chunk := make([]byte, 1024) + n, err := reader.Read(chunk) + if err != nil && err != io.EOF { + panic(fmt.Sprintf("can't read request body: %v", err)) + } + if n == 0 { + break + } + buf = append(buf, chunk[:n]...) + } + return buf +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/clients.go b/cmd/network-dependencies-webhook/pkg/validate/clients.go new file mode 100644 index 00000000..b5200f8b --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/clients.go @@ -0,0 +1,339 @@ +package validate + +import ( + "context" + "encoding/json" + "fmt" + + consumerv1 "github.com/edgefarm/provider-nats/apis/consumer/v1alpha1" + streamsv1 "github.com/edgefarm/provider-nats/apis/stream/v1alpha1" + natsv1 "github.com/edgefarm/provider-nats/apis/v1alpha1" + + accountv1 "github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1" + userv1 "github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/dynamic" +) + +// GENERAL + +func GetUidFromDependsOnUidLabel(labels map[string]string) (string, error) { + if val, ok := labels[dependsOnUidLabel]; ok { + return val, nil + } + return "", fmt.Errorf("label %s not found", dependsOnUidLabel) +} + +// CONSUMERS + +func GetConsumers(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) (*consumerv1.ConsumerList, error) { + listRaw, err := client.Resource(consumerResource).List(ctx, options) + if err != nil { + return nil, err + } + list := &consumerv1.ConsumerList{} + for _, e := range listRaw.Items { + j, err := e.MarshalJSON() + if err != nil { + return nil, err + } + s := &consumerv1.Consumer{} + err = json.Unmarshal(j, s) + if err != nil { + return nil, err + } + list.Items = append(list.Items, *s) + } + + return list, nil +} + +func GetConsumerByName(ctx context.Context, client dynamic.Interface, name string) (*consumerv1.Consumer, error) { + list, err := GetConsumers(ctx, client, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return nil, err + } + if len(list.Items) == 0 { + return nil, ErrNotFound + } + if len(list.Items) > 1 { + return nil, ErrMultipleFound + } + return &list.Items[0], nil +} + +func GetConsumersByDependsOnUidLabel(ctx context.Context, client dynamic.Interface, uid string) (*consumerv1.ConsumerList, error) { + return GetConsumers(ctx, client, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dependsOnUidLabel, uid), + }) +} + +func GetConsumerByUid(ctx context.Context, client dynamic.Interface, uid string) (*consumerv1.Consumer, error) { + list, err := GetConsumers(ctx, client, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.uid=%s", uid), + }) + if err != nil { + return nil, err + } + if len(list.Items) == 0 { + return nil, ErrNotFound + } + if len(list.Items) > 1 { + return nil, ErrMultipleFound + } + return &list.Items[0], nil +} + +// STREAMS + +func GetStreams(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) (*streamsv1.StreamList, error) { + listRaw, err := client.Resource(streamResource).List(ctx, options) + if err != nil { + return nil, err + } + list := &streamsv1.StreamList{} + for _, e := range listRaw.Items { + j, err := e.MarshalJSON() + if err != nil { + return nil, err + } + s := &streamsv1.Stream{} + err = json.Unmarshal(j, s) + if err != nil { + return nil, err + } + list.Items = append(list.Items, *s) + } + + return list, nil +} + +func GetStreamByName(ctx context.Context, client dynamic.Interface, name string) (*streamsv1.Stream, error) { + list, err := GetStreams(ctx, client, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return nil, err + } + if len(list.Items) == 0 { + return nil, ErrNotFound + } + if len(list.Items) > 1 { + return nil, ErrMultipleFound + } + return &list.Items[0], nil +} + +func GetStreamsByDependsOnUidLabel(ctx context.Context, client dynamic.Interface, uid string) (*streamsv1.StreamList, error) { + return GetStreams(ctx, client, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dependsOnUidLabel, uid), + }) +} + +func GetStreamByUid(ctx context.Context, client dynamic.Interface, uid string) (*streamsv1.Stream, error) { + list, err := GetStreams(ctx, client, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.uid=%s", uid), + }) + if err != nil { + return nil, err + } + if len(list.Items) == 0 { + return nil, ErrNotFound + } + if len(list.Items) > 1 { + return nil, ErrMultipleFound + } + return &list.Items[0], nil +} + +// USERS + +func GetUsers(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) (*userv1.UserList, error) { + usersRaw, err := client.Resource(userResource).List(ctx, options) + if err != nil { + return nil, err + } + list := &userv1.UserList{} + for _, userRaw := range usersRaw.Items { + userJSON, err := userRaw.MarshalJSON() + if err != nil { + return nil, err + } + user := &userv1.User{} + err = json.Unmarshal(userJSON, user) + if err != nil { + return nil, err + } + list.Items = append(list.Items, *user) + } + + return list, nil +} + +func GetUserByName(ctx context.Context, client dynamic.Interface, name string) (*userv1.User, error) { + users, err := GetUsers(ctx, client, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return nil, err + } + if len(users.Items) == 0 { + return nil, ErrNotFound + } + if len(users.Items) > 1 { + return nil, ErrMultipleFound + } + return &users.Items[0], nil +} + +func GetUsersByDependsOnUidLabel(ctx context.Context, client dynamic.Interface, uid string) (*userv1.UserList, error) { + return GetUsers(ctx, client, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dependsOnUidLabel, uid), + }) +} + +func GetUserByUid(ctx context.Context, client dynamic.Interface, uid string) (*userv1.User, error) { + users, err := GetUsers(ctx, client, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.uid=%s", uid), + }) + if err != nil { + return nil, err + } + if len(users.Items) == 0 { + return nil, ErrNotFound + } + if len(users.Items) > 1 { + return nil, ErrMultipleFound + } + return &users.Items[0], nil +} + +// ACCOUNTS + +func GetAccounts(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) (*accountv1.AccountList, error) { + listRaw, err := client.Resource(accountResource).List(ctx, options) + if err != nil { + return nil, err + } + list := &accountv1.AccountList{} + for _, e := range listRaw.Items { + j, err := e.MarshalJSON() + if err != nil { + return nil, err + } + s := &accountv1.Account{} + err = json.Unmarshal(j, s) + if err != nil { + return nil, err + } + list.Items = append(list.Items, *s) + } + + return list, nil +} + +func GetAccountByName(ctx context.Context, client dynamic.Interface, name string) (*accountv1.Account, error) { + accounts, err := GetAccounts(ctx, client, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return nil, err + } + if accounts.Items == nil { + return nil, ErrNotFound + } + if len(accounts.Items) == 0 { + return nil, ErrNotFound + } + if len(accounts.Items) > 1 { + return nil, ErrMultipleFound + } + return &accounts.Items[0], nil +} + +func GetAccountsByDependsOnUidLabel(ctx context.Context, client dynamic.Interface, uid string) (*accountv1.AccountList, error) { + return GetAccounts(ctx, client, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dependsOnUidLabel, uid), + }) +} + +func GetAccountByUid(ctx context.Context, client dynamic.Interface, uid string) (*accountv1.Account, error) { + accounts, err := GetAccounts(ctx, client, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.uid=%s", uid), + }) + if err != nil { + return nil, err + } + if len(accounts.Items) == 0 { + return nil, ErrNotFound + } + if len(accounts.Items) > 1 { + return nil, ErrMultipleFound + } + return &accounts.Items[0], nil +} + +// PROVIDERCONFIG + +func GetProviderConfigs(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) (*natsv1.ProviderConfigList, error) { + listRaw, err := client.Resource(providerConfigResource).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + list := &natsv1.ProviderConfigList{} + for _, e := range listRaw.Items { + j, err := e.MarshalJSON() + if err != nil { + return nil, err + } + s := &natsv1.ProviderConfig{} + err = json.Unmarshal(j, s) + if err != nil { + return nil, err + } + list.Items = append(list.Items, *s) + } + + return list, nil +} + +func GetProviderConfigByName(ctx context.Context, client dynamic.Interface, name string) (*natsv1.ProviderConfig, error) { + providerConfigs, err := GetProviderConfigs(ctx, client, metav1.ListOptions{ + FieldSelector: "metadata.name=" + name, + }) + if err != nil { + return nil, err + } + if len(providerConfigs.Items) == 0 { + return nil, ErrNotFound + } + if len(providerConfigs.Items) > 1 { + return nil, ErrMultipleFound + } + return &providerConfigs.Items[0], nil +} + +func GetProviderConfigsByDependsOnUidLabel(ctx context.Context, client dynamic.Interface, uid string) (*natsv1.ProviderConfigList, error) { + return GetProviderConfigs(ctx, client, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", dependsOnUidLabel, uid), + }) +} + +func GetProviderConfigByUid(ctx context.Context, client dynamic.Interface, uid string) (*natsv1.ProviderConfig, error) { + providerConfigs, err := GetProviderConfigs(ctx, client, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.uid=%s", uid), + }) + if err != nil { + return nil, err + } + if len(providerConfigs.Items) == 0 { + return nil, ErrNotFound + } + if len(providerConfigs.Items) > 1 { + return nil, ErrMultipleFound + } + return &providerConfigs.Items[0], nil +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/constants.go b/cmd/network-dependencies-webhook/pkg/validate/constants.go new file mode 100644 index 00000000..0f6a2383 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/constants.go @@ -0,0 +1,11 @@ +package validate + +var ( + dependsOnUidLabel = "dependsOnUid" + systemUserSuffix = "system" + systemAccountUserSuffix = "sys-account-user" + streamNetworkLabelKey = "streams.network.edgefarm.io/network" + streamSubnetworkLabelKey = "streams.network.edgefarm.io/subnetwork" + podNetworkLabelKeyPrefix = "name.network.edgefarm.io/" + podSubnetworkLabelKeyPrefix = "subnetwork.network.edgefarm.io/" +) diff --git a/cmd/network-dependencies-webhook/pkg/validate/errors.go b/cmd/network-dependencies-webhook/pkg/validate/errors.go new file mode 100644 index 00000000..fdbf4fdb --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/errors.go @@ -0,0 +1,7 @@ +package validate + +import "errors" + +var ErrNotFound = errors.New("not found") +var ErrMultipleFound = errors.New("multiple found") +var ErrInvalid = errors.New("invalid") diff --git a/cmd/network-dependencies-webhook/pkg/validate/gvr.go b/cmd/network-dependencies-webhook/pkg/validate/gvr.go new file mode 100644 index 00000000..ad0fe6d8 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/gvr.go @@ -0,0 +1,35 @@ +package validate + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var ( + consumerResource = schema.GroupVersionResource{ + Group: "streams.network.edgefarm.io", + Version: "v1alpha1", + Resource: "xconsumers", + } + + streamResource = schema.GroupVersionResource{ + Group: "streams.network.edgefarm.io", + Version: "v1alpha1", + Resource: "xstreams", + } + + userResource = schema.GroupVersionResource{ + Group: "issue.natssecrets.crossplane.io", + Version: "v1alpha1", + Resource: "users", + } + + accountResource = schema.GroupVersionResource{ + Group: "issue.natssecrets.crossplane.io", + Version: "v1alpha1", + Resource: "accounts", + } + + providerConfigResource = schema.GroupVersionResource{ + Group: "nats.crossplane.io", + Version: "v1alpha1", + Resource: "providerconfigs", + } +) diff --git a/cmd/network-dependencies-webhook/pkg/validate/pods.go b/cmd/network-dependencies-webhook/pkg/validate/pods.go new file mode 100644 index 00000000..9f922a79 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/pods.go @@ -0,0 +1,103 @@ +package validate + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/edgefarm/edgefarm.network/internal/common" + + "k8s.io/api/admission/v1beta1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" +) + +// GetFromSuffixLabelKey returns the value of a label with a given prefix +// Note, that the prefix must be unique otherwise the first match is returned +func GetFromSuffixLabelKey(labels map[string]string, prefix string) (string, error) { + for k := range labels { + if strings.Contains(k, prefix) { + network := "" + format := prefix + "%s" + n, err := fmt.Sscanf(k, format, &network) + if err != nil || n != 1 { + return "", fmt.Errorf("can't parse label %s", k) + } + return network, nil + } + } + return "", fmt.Errorf("label %s not found", prefix) +} + +func Pods(config *rest.Config, w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + codecFactory := serializer.NewCodecFactory(scheme) + deserializer := codecFactory.UniversalDeserializer() + admissionReview, err := admissionReviewFromRequest(r, deserializer) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) + return + } + + if admissionReview.Request.Operation != v1beta1.Delete { + writeAllowedResponse(w, admissionReview) + return + } + + name := admissionReview.Request.Name + namespace := admissionReview.Request.Namespace + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create client: %v", err)) + return + } + + pod, err := clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get pods: %v", err)) + return + } + pod.Labels[common.MarkedForDeletionLabelKey] = common.MarkedForDeletionLabelValue + pod, err = clientset.CoreV1().Pods(namespace).Update(context.Background(), pod, metav1.UpdateOptions{}) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't update pod: %v", err)) + return + } + + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) + return + } + networkName, err := GetFromSuffixLabelKey(pod.Labels, podNetworkLabelKeyPrefix) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get network name: %v", err)) + return + } + subnetworkname, err := GetFromSuffixLabelKey(pod.Labels, podSubnetworkLabelKeyPrefix) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get subnetwork name: %v", err)) + return + } + + streams, err := GetStreams(context.Background(), dynamic, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", streamNetworkLabelKey, networkName, streamSubnetworkLabelKey, subnetworkname), + }) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't list streams: %v", err)) + return + } + + if len(streams.Items) > 0 { + writeDeniedResponse(w, admissionReview, "cannot delete pod %s: streams still present", pod.Name) + return + } + + writeAllowedResponse(w, admissionReview) +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/pods_test.go b/cmd/network-dependencies-webhook/pkg/validate/pods_test.go new file mode 100644 index 00000000..902ae33e --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/pods_test.go @@ -0,0 +1,27 @@ +package validate + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetFromSuffixLabelKey(t *testing.T) { + assert := assert.New(t) + labels := map[string]string{ + "apps.openyurt.io/controller-revision-hash": "acceleration-network-demo-colibri-colibri-ffc5d5dc7", + "apps.openyurt.io/pool-name": "sparrow", + "name.network.edgefarm.io/acceleration-network-demo-colibri": "", + "network.edgefarm.io/type": "leaf", + "pod-template-hash": "dbf46c65", + "subnetwork.network.edgefarm.io/colibri": "", + } + l, err := GetFromSuffixLabelKey(labels, "name.network.edgefarm.io/") + assert.Nil(err) + assert.Equal("acceleration-network-demo-colibri", l) + l, err = GetFromSuffixLabelKey(labels, "subnetwork.network.edgefarm.io/") + assert.Nil(err) + assert.Equal("colibri", l) + _, err = GetFromSuffixLabelKey(labels, "missing/") + assert.NotNil(err) +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/providerConfigs.go b/cmd/network-dependencies-webhook/pkg/validate/providerConfigs.go new file mode 100644 index 00000000..7735bdb6 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/providerConfigs.go @@ -0,0 +1,55 @@ +package validate + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/api/admission/v1beta1" + "k8s.io/client-go/dynamic" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" +) + +func ProviderConfigs(config *rest.Config, w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + codecFactory := serializer.NewCodecFactory(scheme) + deserializer := codecFactory.UniversalDeserializer() + admissionReview, err := admissionReviewFromRequest(r, deserializer) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) + return + } + + if admissionReview.Request.Operation != v1beta1.Delete { + writeAllowedResponse(w, admissionReview) + return + } + + name := admissionReview.Request.Name + + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) + return + } + providerConfig, err := GetProviderConfigByName(context.Background(), dynamic, name) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get providerConfig: %v", err)) + return + } + uid := string(providerConfig.ObjectMeta.UID) + + streams, err := GetStreamsByDependsOnUidLabel(context.Background(), dynamic, uid) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get stream: %v", err)) + return + } + if len(streams.Items) > 0 { + writeDeniedResponse(w, admissionReview, "cannot delete providerConfig %s: streams still present", name) + return + } + writeAllowedResponse(w, admissionReview) +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/streams.go b/cmd/network-dependencies-webhook/pkg/validate/streams.go new file mode 100644 index 00000000..73d54302 --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/streams.go @@ -0,0 +1,54 @@ +package validate + +import ( + "context" + "fmt" + "net/http" + + "k8s.io/api/admission/v1beta1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +func Streams(config *rest.Config, w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + codecFactory := serializer.NewCodecFactory(scheme) + deserializer := codecFactory.UniversalDeserializer() + admissionReview, err := admissionReviewFromRequest(r, deserializer) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) + return + } + + if admissionReview.Request.Operation != v1beta1.Delete { + writeAllowedResponse(w, admissionReview) + return + } + + name := admissionReview.Request.Name + + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) + return + } + stream, err := GetStreamByName(context.Background(), dynamic, name) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get stream: %v", err)) + return + } + uid := string(stream.ObjectMeta.UID) + consumers, err := GetConsumersByDependsOnUidLabel(context.Background(), dynamic, uid) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get consumer: %v", err)) + return + } + if len(consumers.Items) > 0 { + writeDeniedResponse(w, admissionReview, "cannot delete stream %s: consumers still present", name) + return + } + writeAllowedResponse(w, admissionReview) +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/users.go b/cmd/network-dependencies-webhook/pkg/validate/users.go new file mode 100644 index 00000000..7524c8bf --- /dev/null +++ b/cmd/network-dependencies-webhook/pkg/validate/users.go @@ -0,0 +1,100 @@ +package validate + +import ( + "context" + "errors" + "fmt" + "net/http" + "strings" + + userv1 "github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1" + "k8s.io/api/admission/v1beta1" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +func Users(config *rest.Config, w http.ResponseWriter, r *http.Request) { + scheme := runtime.NewScheme() + codecFactory := serializer.NewCodecFactory(scheme) + deserializer := codecFactory.UniversalDeserializer() + admissionReview, err := admissionReviewFromRequest(r, deserializer) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) + return + } + + if admissionReview.Request.Operation != v1beta1.Delete { + writeAllowedResponse(w, admissionReview) + return + } + + name := admissionReview.Request.Name + + dynamic, err := dynamic.NewForConfig(config) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) + return + } + + user, err := GetUserByName(context.Background(), dynamic, name) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get user: %v", err)) + return + } + if val, ok := user.ObjectMeta.Annotations["crossplane.io/composition-resource-name"]; ok { + if val == systemUserSuffix { + SystemUser(w, admissionReview, dynamic, user) + return + } else if val == systemAccountUserSuffix { + SysAccountUser(w, admissionReview, dynamic, user) + return + } + } + // normal user always allowed + writeAllowedResponse(w, admissionReview) +} + +// SystemUser handles the deletion of system users +// system users are allowed to be deleted if there is no ProviderConfig with the UID referenced in the DependsOnUid label +func SystemUser(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview, dynamic *dynamic.DynamicClient, user *userv1.User) { + dependsOnUid, err := GetUidFromDependsOnUidLabel(user.ObjectMeta.Labels) + if err != nil { + writeErrorResponse(w, fmt.Errorf("can't get dependsOnUid from user: %v", err)) + return + } + providerConfig, err := GetProviderConfigByUid(context.Background(), dynamic, dependsOnUid) + if err != nil { + if errors.Is(err, ErrNotFound) { + writeAllowedResponse(w, admissionReview) + } else { + writeErrorResponse(w, err) + } + return + } + if providerConfig != nil { + writeDeniedResponse(w, admissionReview, "cannot delete system user %s: provider config %s still present", user.ObjectMeta.Name, providerConfig.ObjectMeta.Name) + return + } +} + +// SysAccountUser handles the deletion of sys-account users +// sys-account users are allowed to be deleted if there is no Account that belongs to the user +func SysAccountUser(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview, dynamic *dynamic.DynamicClient, user *userv1.User) { + name := user.ObjectMeta.Name + accountName := strings.TrimSuffix(name, "-"+systemAccountUserSuffix) + account, err := GetAccountByName(context.Background(), dynamic, accountName) + if err != nil { + if errors.Is(err, ErrNotFound) { + writeAllowedResponse(w, admissionReview) + } else { + writeErrorResponse(w, err) + } + } + if account != nil { + writeDeniedResponse(w, admissionReview, "cannot delete sys-account-user %s: account %s still present", user.ObjectMeta.Name, accountName) + return + } +} diff --git a/cmd/network-dependencies-webhook/pkg/validate/validate.go b/cmd/network-dependencies-webhook/pkg/validate/validate.go deleted file mode 100644 index 58c07265..00000000 --- a/cmd/network-dependencies-webhook/pkg/validate/validate.go +++ /dev/null @@ -1,156 +0,0 @@ -package validate - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/edgefarm/edgefarm.network/internal/common" - - "k8s.io/api/admission/v1beta1" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/rest" -) - -var streamResource = schema.GroupVersionResource{ - Group: "streams.network.edgefarm.io", - Version: "v1alpha1", - Resource: "xstreams", -} - -func ListStreams(ctx context.Context, client dynamic.Interface, options metav1.ListOptions) ([]unstructured.Unstructured, error) { - list, err := client.Resource(streamResource).List(ctx, options) - if err != nil { - return nil, err - } - - return list.Items, nil -} - -func Validate(config *rest.Config, w http.ResponseWriter, r *http.Request) { - scheme := runtime.NewScheme() - codecFactory := serializer.NewCodecFactory(scheme) - deserializer := codecFactory.UniversalDeserializer() - admissionReview, err := admissionReviewFromRequest(r, deserializer) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't retrieve admission review from request: %v", err)) - return - } - - if admissionReview.Request.Operation != v1beta1.Delete { - writeAllowedResponse(w, admissionReview) - return - } - - name := admissionReview.Request.Name - namespace := admissionReview.Request.Namespace - clientset, err := kubernetes.NewForConfig(config) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't create client: %v", err)) - return - } - - pod, err := clientset.CoreV1().Pods(namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't get pods: %v", err)) - return - } - pod.Labels[common.MarkedForDeletionLabelKey] = common.MarkedForDeletionLabelValue - pod, err = clientset.CoreV1().Pods(namespace).Update(context.Background(), pod, metav1.UpdateOptions{}) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't update pod: %v", err)) - return - } - hostname := pod.Spec.NodeName - listOptions := metav1.ListOptions{ - LabelSelector: fmt.Sprintf("streams.network.edgefarm.io/node=%s", hostname), - } - dynamic, err := dynamic.NewForConfig(config) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't create dynamic client: %v", err)) - return - } - items, err := ListStreams(context.Background(), dynamic, listOptions) - if err != nil { - writeErrorResponse(w, fmt.Errorf("can't list streams: %v", err)) - return - } - - if len(items) > 0 { - writeDeniedResponse(w, admissionReview, "cannot delete pod %s: streams still present", pod.Name) - return - } - - writeAllowedResponse(w, admissionReview) -} - -func admissionReviewFromRequest(r *http.Request, deserializer runtime.Decoder) (*v1beta1.AdmissionReview, error) { - var body []byte - if r.Body != nil { - defer r.Body.Close() - body = readAll(r.Body) - } - admissionReview := &v1beta1.AdmissionReview{} - if _, _, err := deserializer.Decode(body, nil, admissionReview); err != nil { - return nil, err - } - return admissionReview, nil -} - -func writeAllowedResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) { - admissionReview.Response = &v1beta1.AdmissionResponse{ - UID: admissionReview.Request.UID, - Allowed: true, - } - writeResponse(w, admissionReview) -} - -func writeDeniedResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview, reason string, args ...interface{}) { - admissionReview.Response = &v1beta1.AdmissionResponse{ - UID: admissionReview.Request.UID, - Allowed: false, - Result: &metav1.Status{ - Message: fmt.Sprintf(reason, args...), - }, - } - writeResponse(w, admissionReview) -} - -func writeErrorResponse(w http.ResponseWriter, err error) { - w.WriteHeader(http.StatusBadRequest) - w.Write([]byte(err.Error())) -} - -func writeResponse(w http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) { - w.Header().Set("Content-Type", "application/json") - responseBytes, err := json.Marshal(admissionReview) - if err != nil { - panic(fmt.Sprintf("can't encode response: %v", err)) - } - w.Write(responseBytes) -} - -func readAll(reader io.Reader) []byte { - var buf []byte - for { - chunk := make([]byte, 1024) - n, err := reader.Read(chunk) - if err != nil && err != io.EOF { - panic(fmt.Sprintf("can't read request body: %v", err)) - } - if n == 0 { - break - } - buf = append(buf, chunk[:n]...) - } - return buf -} diff --git a/deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml b/deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml index df05b749..f20abf65 100644 --- a/deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml +++ b/deploy/compositions/network.streams.network.edgefarm.io/composition-networks.yaml @@ -78,6 +78,12 @@ spec: toFieldPath: status.operator policy: fromFieldPath: Required + + - type: ToCompositeFieldPath + fromFieldPath: status.atProvider.manifest.data[sysAccount] + toFieldPath: status.sysAccount + policy: + fromFieldPath: Required readinessChecks: - type: None @@ -192,7 +198,7 @@ spec: name: #patched forProvider: operator: #patched - account: sys + account: #patched claims: user: data: -1 @@ -223,6 +229,10 @@ spec: toFieldPath: spec.forProvider.operator type: FromCompositeFieldPath + - fromFieldPath: status.sysAccount + toFieldPath: spec.forProvider.account + type: FromCompositeFieldPath + - fromFieldPath: metadata.labels["crossplane.io/claim-namespace"] toFieldPath: spec.writeConnectionSecretToRef.namespace type: FromCompositeFieldPath diff --git a/deploy/compositions/network.streams.network.edgefarm.io/definition.yaml b/deploy/compositions/network.streams.network.edgefarm.io/definition.yaml index e210a33f..d0242b63 100644 --- a/deploy/compositions/network.streams.network.edgefarm.io/definition.yaml +++ b/deploy/compositions/network.streams.network.edgefarm.io/definition.yaml @@ -29,6 +29,10 @@ spec: description: > The operator for the NATS server type: string + sysAccount: + description: > + The sys account name for the NATS server + type: string system: description: > The UID of the secret user resource diff --git a/deploy/kyverno/add-dependency-policy.yaml b/deploy/kyverno/add-dependency-policy.yaml index 9a11527d..eb1c5073 100644 --- a/deploy/kyverno/add-dependency-policy.yaml +++ b/deploy/kyverno/add-dependency-policy.yaml @@ -11,11 +11,14 @@ metadata: gets deleted automatically when the dependent resource is deleted. name: add-dependency spec: + schemaValidation: false background: false failurePolicy: Fail rules: - generate: apiVersion: dummy.crds.com/v1 + kind: Dependency + namespace: "{{request.object.metadata.name}}" data: metadata: labels: @@ -26,13 +29,17 @@ spec: kind: "{{request.kind.kind}}" name: "{{request.name}}" uid: "{{request.object.metadata.uid}}" - kind: Dependency name: "{{request.name}}-{{request.object.metadata.labels.dependsOnUid}}" match: any: - resources: kinds: - "*" + # - User + # - ProviderConfig + # - XEdgeNetwork + # - XStream + # - XConsumer selector: matchLabels: dependsOnUid: ?* diff --git a/deploy/kyverno/add-second-dependency-policy.yaml b/deploy/kyverno/add-second-dependency-policy.yaml index 271e5127..6f46ca33 100644 --- a/deploy/kyverno/add-second-dependency-policy.yaml +++ b/deploy/kyverno/add-second-dependency-policy.yaml @@ -32,7 +32,7 @@ spec: any: - resources: kinds: - - "*" + - "XEdgeNetwork" selector: matchLabels: dependsOnSecondUid: ?* diff --git a/deploy/kyverno/kustomization.yaml b/deploy/kyverno/kustomization.yaml index 7795772f..73d0195d 100644 --- a/deploy/kyverno/kustomization.yaml +++ b/deploy/kyverno/kustomization.yaml @@ -1,6 +1,6 @@ resources: - dependency_crd.yaml - - clusterrole.yaml + # - clusterrole.yaml - add-dependency-policy.yaml - add-second-dependency-policy.yaml - block-delete-policy.yaml diff --git a/deploy/kyverno/wip/prevent-label.yaml b/deploy/kyverno/wip/prevent-label.yaml deleted file mode 100644 index 66d79d76..00000000 --- a/deploy/kyverno/wip/prevent-label.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kyverno.io/v1 -kind: ClusterPolicy -metadata: - annotations: - policies.kyverno.io/title: Removes the label that is used to track dependencies for XStream resources - policies.kyverno.io/category: Dependency Management - policies.kyverno.io/subject: Lifecycle Management - policies.kyverno.io/description: >- - Removes the label that is used to track dependencies for XStream resources. - name: prevent-label -spec: - background: false - failurePolicy: Fail - rules: - - mutate: - patchesJson6902: |- - - path: "/metadata/labels/dependsOnUid" - op: remove - match: - any: - - resources: - kinds: - - "streams.network.edgefarm.io/v1alpha1/XStream" - selector: - matchLabels: - dependsOnUid: ?* - name: check-xstream-for-dependency-annotation - preconditions: - any: - - key: "{{ request.operation }}" - operator: Equals - value: CREATE - - key: "{{ request.operation }}" - operator: Equals - value: UPDATE - validationFailureAction: enforce diff --git a/deploy/network-dependencies-webhook/validation-webhook.yaml b/deploy/network-dependencies-webhook/validation-webhook.yaml index 3ed5a516..472470d8 100644 --- a/deploy/network-dependencies-webhook/validation-webhook.yaml +++ b/deploy/network-dependencies-webhook/validation-webhook.yaml @@ -5,7 +5,7 @@ metadata: annotations: cert-manager.io/inject-ca-from: crossplane-system/network-dependencies-webhook webhooks: - - name: network-dependencies-webhook.crossplane-system.svc + - name: validating.network.edgefarm.io.v1alpha1.pods sideEffects: "None" objectSelector: matchLabels: @@ -16,10 +16,86 @@ webhooks: service: name: network-dependencies-webhook namespace: crossplane-system - path: /validate + path: /validating-network-edgefarm-io-v1alpha1-pods caBundle: Cg== rules: - apiGroups: [""] apiVersions: ["v1"] - operations: ["DELETE"] resources: ["pods"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.users + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: network-dependencies-webhook + namespace: crossplane-system + path: /validating-network-edgefarm-io-v1alpha1-users + caBundle: Cg== + rules: + - apiGroups: ["issue.natssecrets.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["users"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.streams + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: network-dependencies-webhook + namespace: crossplane-system + path: /validating-network-edgefarm-io-v1alpha1-streams + caBundle: Cg== + rules: + - apiGroups: ["streams.network.edgefarm.io"] + apiVersions: ["v1alpha1"] + resources: ["xstreams"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.providerconfigs + sideEffects: "None" + objectSelector: + matchExpressions: + - key: dependsOnUid + operator: Exists + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: network-dependencies-webhook + namespace: crossplane-system + path: /validating-network-edgefarm-io-v1alpha1-providerconfigs + caBundle: Cg== + rules: + - apiGroups: ["nats.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["providerconfigs"] + operations: ["DELETE"] + + - name: validating.network.edgefarm.io.v1alpha1.accounts + sideEffects: "None" + admissionReviewVersions: + - "v1beta1" + clientConfig: + service: + name: network-dependencies-webhook + namespace: crossplane-system + path: /validating-network-edgefarm-io-v1alpha1-accounts + caBundle: Cg== + rules: + - apiGroups: ["issue.natssecrets.crossplane.io"] + apiVersions: ["v1alpha1"] + resources: ["account"] + operations: ["DELETE"] diff --git a/go.mod b/go.mod index c8faf3c5..16913c17 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,8 @@ module github.com/edgefarm/edgefarm.network go 1.20 require ( + github.com/edgefarm/provider-nats v0.2.0 + github.com/edgefarm/provider-natssecrets v0.2.2 github.com/stretchr/testify v1.8.2 k8s.io/api v0.26.3 k8s.io/apimachinery v0.26.3 @@ -10,41 +12,53 @@ require ( ) require ( + github.com/crossplane/crossplane-runtime v0.19.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/edgefarm/vault-plugin-secrets-nats v1.1.0 // indirect + github.com/emicklei/go-restful/v3 v3.10.1 // indirect + github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/go-logr/logr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/swag v0.22.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.5.7-v3refs // indirect + github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect - github.com/google/gofuzz v1.1.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/nats-io/jwt/v2 v2.3.0 // indirect + github.com/nats-io/nats.go v1.24.0 // indirect + github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/afero v1.9.4 // indirect github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect golang.org/x/net v0.7.0 // indirect - golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 // indirect + golang.org/x/oauth2 v0.5.0 // indirect golang.org/x/sys v0.5.0 // indirect golang.org/x/term v0.5.0 // indirect golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect + golang.org/x/time v0.3.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + k8s.io/klog/v2 v2.90.0 // indirect + k8s.io/kube-openapi v0.0.0-20230224204730-66828de6f33b // indirect + k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect + sigs.k8s.io/controller-runtime v0.14.4 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 2f95d183..c1701644 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,7 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -13,6 +14,9 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -30,46 +34,74 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/crossplane/crossplane-runtime v0.19.1 h1:KpDwO5XlKeDfPpms0P57W77YcRHDM/nE3A82ZlYGsyA= +github.com/crossplane/crossplane-runtime v0.19.1/go.mod h1:OJQ1NxtQK2ZTRmvtnQPoy8LsXsARTnVydRVDQEgIuz4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/edgefarm/provider-nats v0.2.0 h1:xPaBGy/qEZ5jzPdFc1xURO4PYgqbTpN2cpQJB9nFJ0E= +github.com/edgefarm/provider-nats v0.2.0/go.mod h1:W7ed8PcRPaWByIqsdLMLzC4YVRFCPQSPXa6QrnE+tL4= +github.com/edgefarm/provider-natssecrets v0.2.2 h1:w1F6lZodX6OpqwfFrerW3PsBkWbh4YeH2aqcfOV+EbQ= +github.com/edgefarm/provider-natssecrets v0.2.2/go.mod h1:t78YuX0d20JAGB85bzho1YPMYqQkS8msu8zSu/SytJE= +github.com/edgefarm/vault-plugin-secrets-nats v1.1.0 h1:wn3NvPQQSWGYLuqUIpbRMPmf4WTDJnQYDPEwe6fJ0k4= +github.com/edgefarm/vault-plugin-secrets-nats v1.1.0/go.mod h1:SV2ICfgfhHstIpb82kID5+Fr8/LAclX/r/Nfs8uMpoE= +github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= +github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= +github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -90,13 +122,14 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= +github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= +github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -104,14 +137,17 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -119,14 +155,23 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -135,16 +180,20 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -152,14 +201,33 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= +github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.9.10 h1:LMC46Oi9E6BUx/xBsaCVZgofliAqKQzRPU6eKWkN8jE= +github.com/nats-io/nats.go v1.24.0 h1:CRiD8L5GOQu/DcfkmgBcTTIQORMwizF+rPk6T0RaHVQ= +github.com/nats-io/nats.go v1.24.0/go.mod h1:dVQF+BK3SzUZpwyzHedXsvH3EO38aVKuOPkkHlv5hXA= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= +github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/common v0.40.0 h1:Afz7EVRqGg2Mqqf4JuF9vdvp1pi220m55Pi9T2JnO4Q= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.9.4 h1:Sd43wM1IWz/s1aVXdOBkjJvuP8UdyqioeE4AmM0QsBs= +github.com/spf13/afero v1.9.4/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= @@ -169,11 +237,15 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -183,11 +255,21 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -198,6 +280,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 h1:Jvc7gsqn21cJHCmAWx0LiimpP18LZmUxkT5Mp7EZ1mI= +golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -210,6 +294,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -218,6 +303,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -245,7 +332,12 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -253,8 +345,12 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 h1:+jnHzr9VPj32ykQVai5DNahi9+NSp7yYuCsl5eAQtL0= -golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.5.0 h1:HuArIo48skDwlrvM3sEdHXElYslAMsf3KwRkkW4MC4s= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -264,6 +360,7 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -289,12 +386,21 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -302,14 +408,16 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220411224347-583f2d630306 h1:+gHMid33q6pen7kv9xvT+JRinntgeXO2AeZVd0AWD3w= -golang.org/x/time v0.0.0-20220411224347-583f2d630306/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -351,11 +459,19 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -372,6 +488,9 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -403,13 +522,21 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -422,6 +549,13 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -434,22 +568,25 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -461,21 +598,25 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.26.3 h1:emf74GIQMTik01Aum9dPP0gAypL8JTLl/lHa4V9RFSU= k8s.io/api v0.26.3/go.mod h1:PXsqwPMXBSBcL1lJ9CYDKy7kIReUydukS5JiRlxC3qE= +k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= k8s.io/apimachinery v0.26.3 h1:dQx6PNETJ7nODU3XPtrwkfuubs6w7sX0M8n61zHIV/k= k8s.io/apimachinery v0.26.3/go.mod h1:ats7nN1LExKHvJ9TmwootT00Yz05MuYqPXEXaVeOy5I= k8s.io/client-go v0.26.3 h1:k1UY+KXfkxV2ScEL3gilKcF7761xkYsSD6BC9szIu8s= k8s.io/client-go v0.26.3/go.mod h1:ZPNu9lm8/dbRIPAgteN30RSXea6vrCpFvq+MateTUuQ= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= +k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M= +k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20230224204730-66828de6f33b h1:4dkmFEDQj0ZBLKCxJ0R+qzhvZmEvRdRaaZAE06tR/Lg= +k8s.io/kube-openapi v0.0.0-20230224204730-66828de6f33b/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk= +k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/controller-runtime v0.14.4 h1:Kd/Qgx5pd2XUL08eOV2vwIq3L9GhIbJ5Nxengbd4/0M= +sigs.k8s.io/controller-runtime v0.14.4/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/vendor/github.com/crossplane/crossplane-runtime/LICENSE b/vendor/github.com/crossplane/crossplane-runtime/LICENSE new file mode 100644 index 00000000..ef10385c --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Crossplane Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go new file mode 100644 index 00000000..9c5d29f7 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/condition.go @@ -0,0 +1,264 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "sort" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// A ConditionType represents a condition a resource could be in. +type ConditionType string + +// Condition types. +const ( + // TypeReady resources are believed to be ready to handle work. + TypeReady ConditionType = "Ready" + + // TypeSynced resources are believed to be in sync with the + // Kubernetes resources that manage their lifecycle. + TypeSynced ConditionType = "Synced" +) + +// A ConditionReason represents the reason a resource is in a condition. +type ConditionReason string + +// Reasons a resource is or is not ready. +const ( + ReasonAvailable ConditionReason = "Available" + ReasonUnavailable ConditionReason = "Unavailable" + ReasonCreating ConditionReason = "Creating" + ReasonDeleting ConditionReason = "Deleting" +) + +// Reasons a resource is or is not synced. +const ( + ReasonReconcileSuccess ConditionReason = "ReconcileSuccess" + ReasonReconcileError ConditionReason = "ReconcileError" + ReasonReconcilePaused ConditionReason = "ReconcilePaused" +) + +// A Condition that may apply to a resource. +type Condition struct { + // Type of this condition. At most one of each condition type may apply to + // a resource at any point in time. + Type ConditionType `json:"type"` + + // Status of this condition; is it currently True, False, or Unknown? + Status corev1.ConditionStatus `json:"status"` + + // LastTransitionTime is the last time this condition transitioned from one + // status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime"` + + // A Reason for this condition's last transition from one status to another. + Reason ConditionReason `json:"reason"` + + // A Message containing details about this condition's last transition from + // one status to another, if any. + // +optional + Message string `json:"message,omitempty"` +} + +// Equal returns true if the condition is identical to the supplied condition, +// ignoring the LastTransitionTime. +func (c Condition) Equal(other Condition) bool { + return c.Type == other.Type && + c.Status == other.Status && + c.Reason == other.Reason && + c.Message == other.Message +} + +// WithMessage returns a condition by adding the provided message to existing +// condition. +func (c Condition) WithMessage(msg string) Condition { + c.Message = msg + return c +} + +// NOTE(negz): Conditions are implemented as a slice rather than a map to comply +// with Kubernetes API conventions. Ideally we'd comply by using a map that +// marshalled to a JSON array, but doing so confuses the CRD schema generator. +// https://github.com/kubernetes/community/blob/9bf8cd/contributors/devel/sig-architecture/api-conventions.md#lists-of-named-subobjects-preferred-over-maps + +// NOTE(negz): Do not manipulate Conditions directly. Use the Set method. + +// A ConditionedStatus reflects the observed status of a resource. Only +// one condition of each type may exist. +type ConditionedStatus struct { + // Conditions of the resource. + // +optional + Conditions []Condition `json:"conditions,omitempty"` +} + +// NewConditionedStatus returns a stat with the supplied conditions set. +func NewConditionedStatus(c ...Condition) *ConditionedStatus { + s := &ConditionedStatus{} + s.SetConditions(c...) + return s +} + +// GetCondition returns the condition for the given ConditionType if exists, +// otherwise returns nil +func (s *ConditionedStatus) GetCondition(ct ConditionType) Condition { + for _, c := range s.Conditions { + if c.Type == ct { + return c + } + } + + return Condition{Type: ct, Status: corev1.ConditionUnknown} +} + +// SetConditions sets the supplied conditions, replacing any existing conditions +// of the same type. This is a no-op if all supplied conditions are identical, +// ignoring the last transition time, to those already set. +func (s *ConditionedStatus) SetConditions(c ...Condition) { + for _, new := range c { + exists := false + for i, existing := range s.Conditions { + if existing.Type != new.Type { + continue + } + + if existing.Equal(new) { + exists = true + continue + } + + s.Conditions[i] = new + exists = true + } + if !exists { + s.Conditions = append(s.Conditions, new) + } + } +} + +// Equal returns true if the status is identical to the supplied status, +// ignoring the LastTransitionTimes and order of statuses. +func (s *ConditionedStatus) Equal(other *ConditionedStatus) bool { + if s == nil || other == nil { + return s == nil && other == nil + } + + if len(other.Conditions) != len(s.Conditions) { + return false + } + + sc := make([]Condition, len(s.Conditions)) + copy(sc, s.Conditions) + + oc := make([]Condition, len(other.Conditions)) + copy(oc, other.Conditions) + + // We should not have more than one condition of each type. + sort.Slice(sc, func(i, j int) bool { return sc[i].Type < sc[j].Type }) + sort.Slice(oc, func(i, j int) bool { return oc[i].Type < oc[j].Type }) + + for i := range sc { + if !sc[i].Equal(oc[i]) { + return false + } + } + + return true +} + +// Creating returns a condition that indicates the resource is currently +// being created. +func Creating() Condition { + return Condition{ + Type: TypeReady, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: ReasonCreating, + } +} + +// Deleting returns a condition that indicates the resource is currently +// being deleted. +func Deleting() Condition { + return Condition{ + Type: TypeReady, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: ReasonDeleting, + } +} + +// Available returns a condition that indicates the resource is +// currently observed to be available for use. +func Available() Condition { + return Condition{ + Type: TypeReady, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: ReasonAvailable, + } +} + +// Unavailable returns a condition that indicates the resource is not +// currently available for use. Unavailable should be set only when Crossplane +// expects the resource to be available but knows it is not, for example +// because its API reports it is unhealthy. +func Unavailable() Condition { + return Condition{ + Type: TypeReady, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: ReasonUnavailable, + } +} + +// ReconcileSuccess returns a condition indicating that Crossplane successfully +// completed the most recent reconciliation of the resource. +func ReconcileSuccess() Condition { + return Condition{ + Type: TypeSynced, + Status: corev1.ConditionTrue, + LastTransitionTime: metav1.Now(), + Reason: ReasonReconcileSuccess, + } +} + +// ReconcileError returns a condition indicating that Crossplane encountered an +// error while reconciling the resource. This could mean Crossplane was +// unable to update the resource to reflect its desired state, or that +// Crossplane was unable to determine the current actual state of the resource. +func ReconcileError(err error) Condition { + return Condition{ + Type: TypeSynced, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: ReasonReconcileError, + Message: err.Error(), + } +} + +// ReconcilePaused returns a condition that indicates reconciliation on +// the managed resource is paused via the pause annotation. +func ReconcilePaused() Condition { + return Condition{ + Type: TypeSynced, + Status: corev1.ConditionFalse, + LastTransitionTime: metav1.Now(), + Reason: ReasonReconcilePaused, + } +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/connection_details.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/connection_details.go new file mode 100644 index 00000000..9b5e871f --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/connection_details.go @@ -0,0 +1,226 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // LabelKeyOwnerUID is the UID of the owner resource of a connection secret. + // Kubernetes provides owner/controller references to track ownership of + // resources including secrets, however, this would only work for in cluster + // k8s secrets. We opted to use a label for this purpose to be consistent + // across Secret Store implementations and expect all to support + // setting/getting labels. + LabelKeyOwnerUID = "secret.crossplane.io/owner-uid" +) + +// PublishConnectionDetailsTo represents configuration of a connection secret. +type PublishConnectionDetailsTo struct { + // Name is the name of the connection secret. + Name string `json:"name"` + + // Metadata is the metadata for connection secret. + // +optional + Metadata *ConnectionSecretMetadata `json:"metadata,omitempty"` + + // SecretStoreConfigRef specifies which secret store config should be used + // for this ConnectionSecret. + // +optional + // +kubebuilder:default={"name": "default"} + SecretStoreConfigRef *Reference `json:"configRef,omitempty"` +} + +// ConnectionSecretMetadata represents metadata of a connection secret. +// Labels are used to track ownership of connection secrets and has to be +// supported for any secret store implementation. +type ConnectionSecretMetadata struct { + // Labels are the labels/tags to be added to connection secret. + // - For Kubernetes secrets, this will be used as "metadata.labels". + // - It is up to Secret Store implementation for others store types. + // +optional + Labels map[string]string `json:"labels,omitempty"` + // Annotations are the annotations to be added to connection secret. + // - For Kubernetes secrets, this will be used as "metadata.annotations". + // - It is up to Secret Store implementation for others store types. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + // Type is the SecretType for the connection secret. + // - Only valid for Kubernetes Secret Stores. + // +optional + Type *corev1.SecretType `json:"type,omitempty"` +} + +// SetOwnerUID sets owner object uid label. +func (in *ConnectionSecretMetadata) SetOwnerUID(uid types.UID) { + if in.Labels == nil { + in.Labels = map[string]string{} + } + in.Labels[LabelKeyOwnerUID] = string(uid) +} + +// GetOwnerUID gets owner object uid. +func (in *ConnectionSecretMetadata) GetOwnerUID() string { + if u, ok := in.Labels[LabelKeyOwnerUID]; ok { + return u + } + return "" +} + +// SecretStoreType represents a secret store type. +type SecretStoreType string + +const ( + // SecretStoreKubernetes indicates that secret store type is + // Kubernetes. In other words, connection secrets will be stored as K8s + // Secrets. + SecretStoreKubernetes SecretStoreType = "Kubernetes" + + // SecretStoreVault indicates that secret store type is Vault. + SecretStoreVault SecretStoreType = "Vault" +) + +// SecretStoreConfig represents configuration of a Secret Store. +type SecretStoreConfig struct { + // Type configures which secret store to be used. Only the configuration + // block for this store will be used and others will be ignored if provided. + // Default is Kubernetes. + // +optional + // +kubebuilder:default=Kubernetes + Type *SecretStoreType `json:"type,omitempty"` + + // DefaultScope used for scoping secrets for "cluster-scoped" resources. + // If store type is "Kubernetes", this would mean the default namespace to + // store connection secrets for cluster scoped resources. + // In case of "Vault", this would be used as the default parent path. + // Typically, should be set as Crossplane installation namespace. + DefaultScope string `json:"defaultScope"` + + // Kubernetes configures a Kubernetes secret store. + // If the "type" is "Kubernetes" but no config provided, in cluster config + // will be used. + // +optional + Kubernetes *KubernetesSecretStoreConfig `json:"kubernetes,omitempty"` + + // Vault configures a Vault secret store. + // +optional + Vault *VaultSecretStoreConfig `json:"vault,omitempty"` +} + +// KubernetesAuthConfig required to authenticate to a K8s API. It expects +// a "kubeconfig" file to be provided. +type KubernetesAuthConfig struct { + // Source of the credentials. + // +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem + Source CredentialsSource `json:"source"` + + // CommonCredentialSelectors provides common selectors for extracting + // credentials. + CommonCredentialSelectors `json:",inline"` +} + +// KubernetesSecretStoreConfig represents the required configuration +// for a Kubernetes secret store. +type KubernetesSecretStoreConfig struct { + // Credentials used to connect to the Kubernetes API. + Auth KubernetesAuthConfig `json:"auth"` + + // TODO(turkenh): Support additional identities like + // https://github.com/crossplane-contrib/provider-kubernetes/blob/4d722ef914e6964e80e190317daca9872ae98738/apis/v1alpha1/types.go#L34 +} + +// VaultAuthMethod represent a Vault authentication method. +// https://www.vaultproject.io/docs/auth +type VaultAuthMethod string + +const ( + // VaultAuthToken indicates that "Token Auth" will be used to + // authenticate to Vault. + // https://www.vaultproject.io/docs/auth/token + VaultAuthToken VaultAuthMethod = "Token" +) + +// VaultAuthTokenConfig represents configuration for Vault Token Auth Method. +// https://www.vaultproject.io/docs/auth/token +type VaultAuthTokenConfig struct { + // Source of the credentials. + // +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem + Source CredentialsSource `json:"source"` + + // CommonCredentialSelectors provides common selectors for extracting + // credentials. + CommonCredentialSelectors `json:",inline"` +} + +// VaultAuthConfig required to authenticate to a Vault API. +type VaultAuthConfig struct { + // Method configures which auth method will be used. + Method VaultAuthMethod `json:"method"` + // Token configures Token Auth for Vault. + // +optional + Token *VaultAuthTokenConfig `json:"token,omitempty"` +} + +// VaultCABundleConfig represents configuration for configuring a CA bundle. +type VaultCABundleConfig struct { + // Source of the credentials. + // +kubebuilder:validation:Enum=None;Secret;Environment;Filesystem + Source CredentialsSource `json:"source"` + + // CommonCredentialSelectors provides common selectors for extracting + // credentials. + CommonCredentialSelectors `json:",inline"` +} + +// VaultKVVersion represent API version of the Vault KV engine +// https://www.vaultproject.io/docs/secrets/kv +type VaultKVVersion string + +const ( + // VaultKVVersionV1 indicates that Secret API is KV Secrets Engine Version 1 + // https://www.vaultproject.io/docs/secrets/kv/kv-v1 + VaultKVVersionV1 VaultKVVersion = "v1" + + // VaultKVVersionV2 indicates that Secret API is KV Secrets Engine Version 2 + // https://www.vaultproject.io/docs/secrets/kv/kv-v2 + VaultKVVersionV2 VaultKVVersion = "v2" +) + +// VaultSecretStoreConfig represents the required configuration for a Vault +// secret store. +type VaultSecretStoreConfig struct { + // Server is the url of the Vault server, e.g. "https://vault.acme.org" + Server string `json:"server"` + + // MountPath is the mount path of the KV secrets engine. + MountPath string `json:"mountPath"` + + // Version of the KV Secrets engine of Vault. + // https://www.vaultproject.io/docs/secrets/kv + // +optional + // +kubebuilder:default=v2 + Version *VaultKVVersion `json:"version,omitempty"` + + // CABundle configures CA bundle for Vault Server. + // +optional + CABundle *VaultCABundleConfig `json:"caBundle,omitempty"` + + // Auth configures an authentication method for Vault. + Auth VaultAuthConfig `json:"auth"` +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go new file mode 100644 index 00000000..9b99de7b --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains core API types used by most Crossplane resources. +// +kubebuilder:object:generate=true +package v1 diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go new file mode 100644 index 00000000..efed85db --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/merge.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "github.com/imdario/mergo" +) + +// MergeOptions Specifies merge options on a field path +type MergeOptions struct { // TODO(aru): add more options that control merging behavior + // Specifies that already existing values in a merged map should be preserved + // +optional + KeepMapValues *bool `json:"keepMapValues,omitempty"` + // Specifies that already existing elements in a merged slice should be preserved + // +optional + AppendSlice *bool `json:"appendSlice,omitempty"` +} + +// MergoConfiguration the default behavior is to replace maps and slices +func (mo *MergeOptions) MergoConfiguration() []func(*mergo.Config) { + config := []func(*mergo.Config){mergo.WithOverride} + if mo == nil { + return config + } + + if mo.KeepMapValues != nil && *mo.KeepMapValues { + config = config[:0] + } + if mo.AppendSlice != nil && *mo.AppendSlice { + config = append(config, mergo.WithAppendSlice) + } + return config +} + +// IsAppendSlice returns true if mo.AppendSlice is set to true +func (mo *MergeOptions) IsAppendSlice() bool { + return mo != nil && mo.AppendSlice != nil && *mo.AppendSlice +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go new file mode 100644 index 00000000..560b98b2 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/policies.go @@ -0,0 +1,85 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// A DeletionPolicy determines what should happen to the underlying external +// resource when a managed resource is deleted. +// +kubebuilder:validation:Enum=Orphan;Delete +type DeletionPolicy string + +const ( + // DeletionOrphan means the external resource will orphaned when its managed + // resource is deleted. + DeletionOrphan DeletionPolicy = "Orphan" + + // DeletionDelete means both the external resource will be deleted when its + // managed resource is deleted. + DeletionDelete DeletionPolicy = "Delete" +) + +// A CompositeDeletePolicy determines how the composite resource should be deleted +// when the corresponding claim is deleted. +// +kubebuilder:validation:Enum=Background;Foreground +type CompositeDeletePolicy string + +const ( + // CompositeDeleteBackground means the composite resource will be deleted using + // the Background Propagation Policy when the claim is deleted. + CompositeDeleteBackground CompositeDeletePolicy = "Background" + + // CompositeDeleteForeground means the composite resource will be deleted using + // the Foreground Propagation Policy when the claim is deleted. + CompositeDeleteForeground CompositeDeletePolicy = "Foreground" +) + +// An UpdatePolicy determines how something should be updated - either +// automatically (without human intervention) or manually. +// +kubebuilder:validation:Enum=Automatic;Manual +type UpdatePolicy string + +const ( + // UpdateAutomatic means the resource should be updated automatically, + // without any human intervention. + UpdateAutomatic UpdatePolicy = "Automatic" + + // UpdateManual means the resource requires human intervention to + // update. + UpdateManual UpdatePolicy = "Manual" +) + +// ResolvePolicy is a type for resolve policy. +type ResolvePolicy string + +// ResolutionPolicy is a type for resolution policy. +type ResolutionPolicy string + +const ( + // ResolvePolicyAlways is a resolve option. + // When the ResolvePolicy is set to ResolvePolicyAlways the reference will + // be tried to resolve for every reconcile loop. + ResolvePolicyAlways ResolvePolicy = "Always" + + // ResolutionPolicyRequired is a resolution option. + // When the ResolutionPolicy is set to ResolutionPolicyRequired the execution + // could not continue even if the reference cannot be resolved. + ResolutionPolicyRequired ResolutionPolicy = "Required" + + // ResolutionPolicyOptional is a resolution option. + // When the ReferenceResolutionPolicy is set to ReferencePolicyOptional the + // execution could continue even if the reference cannot be resolved. + ResolutionPolicyOptional ResolutionPolicy = "Optional" +) diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go new file mode 100644 index 00000000..872d9a09 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/resource.go @@ -0,0 +1,321 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // ResourceCredentialsSecretEndpointKey is the key inside a connection secret for the connection endpoint + ResourceCredentialsSecretEndpointKey = "endpoint" + // ResourceCredentialsSecretPortKey is the key inside a connection secret for the connection port + ResourceCredentialsSecretPortKey = "port" + // ResourceCredentialsSecretUserKey is the key inside a connection secret for the connection user + ResourceCredentialsSecretUserKey = "username" + // ResourceCredentialsSecretPasswordKey is the key inside a connection secret for the connection password + ResourceCredentialsSecretPasswordKey = "password" + // ResourceCredentialsSecretCAKey is the key inside a connection secret for the server CA certificate + ResourceCredentialsSecretCAKey = "clusterCA" + // ResourceCredentialsSecretClientCertKey is the key inside a connection secret for the client certificate + ResourceCredentialsSecretClientCertKey = "clientCert" + // ResourceCredentialsSecretClientKeyKey is the key inside a connection secret for the client key + ResourceCredentialsSecretClientKeyKey = "clientKey" + // ResourceCredentialsSecretTokenKey is the key inside a connection secret for the bearer token value + ResourceCredentialsSecretTokenKey = "token" + // ResourceCredentialsSecretKubeconfigKey is the key inside a connection secret for the raw kubeconfig yaml + ResourceCredentialsSecretKubeconfigKey = "kubeconfig" +) + +// LabelKeyProviderName is added to ProviderConfigUsages to relate them to their +// ProviderConfig. +const LabelKeyProviderName = "crossplane.io/provider-config" + +// NOTE(negz): The below secret references differ from ObjectReference and +// LocalObjectReference in that they include only the fields Crossplane needs to +// reference a secret, and make those fields required. This reduces ambiguity in +// the API for resource authors. + +// A LocalSecretReference is a reference to a secret in the same namespace as +// the referencer. +type LocalSecretReference struct { + // Name of the secret. + Name string `json:"name"` +} + +// A SecretReference is a reference to a secret in an arbitrary namespace. +type SecretReference struct { + // Name of the secret. + Name string `json:"name"` + + // Namespace of the secret. + Namespace string `json:"namespace"` +} + +// A SecretKeySelector is a reference to a secret key in an arbitrary namespace. +type SecretKeySelector struct { + SecretReference `json:",inline"` + + // The key to select. + Key string `json:"key"` +} + +// Policy represents the Resolve and Resolution policies of Reference instance. +type Policy struct { + // Resolve specifies when this reference should be resolved. The default + // is 'IfNotPresent', which will attempt to resolve the reference only when + // the corresponding field is not present. Use 'Always' to resolve the + // reference on every reconcile. + // +optional + // +kubebuilder:validation:Enum=Always;IfNotPresent + Resolve *ResolvePolicy `json:"resolve,omitempty"` + + // Resolution specifies whether resolution of this reference is required. + // The default is 'Required', which means the reconcile will fail if the + // reference cannot be resolved. 'Optional' means this reference will be + // a no-op if it cannot be resolved. + // +optional + // +kubebuilder:default=Required + // +kubebuilder:validation:Enum=Required;Optional + Resolution *ResolutionPolicy `json:"resolution,omitempty"` +} + +// IsResolutionPolicyOptional checks whether the resolution policy of relevant reference is Optional. +func (p *Policy) IsResolutionPolicyOptional() bool { + if p == nil || p.Resolution == nil { + return false + } + return *p.Resolution == ResolutionPolicyOptional +} + +// IsResolvePolicyAlways checks whether the resolution policy of relevant reference is Always. +func (p *Policy) IsResolvePolicyAlways() bool { + if p == nil || p.Resolve == nil { + return false + } + return *p.Resolve == ResolvePolicyAlways +} + +// A Reference to a named object. +type Reference struct { + // Name of the referenced object. + Name string `json:"name"` + + // Policies for referencing. + // +optional + Policy *Policy `json:"policy,omitempty"` +} + +// A TypedReference refers to an object by Name, Kind, and APIVersion. It is +// commonly used to reference cluster-scoped objects or objects where the +// namespace is already known. +type TypedReference struct { + // APIVersion of the referenced object. + APIVersion string `json:"apiVersion"` + + // Kind of the referenced object. + Kind string `json:"kind"` + + // Name of the referenced object. + Name string `json:"name"` + + // UID of the referenced object. + // +optional + UID types.UID `json:"uid,omitempty"` +} + +// A Selector selects an object. +type Selector struct { + // MatchLabels ensures an object with matching labels is selected. + MatchLabels map[string]string `json:"matchLabels,omitempty"` + + // MatchControllerRef ensures an object with the same controller reference + // as the selecting object is selected. + MatchControllerRef *bool `json:"matchControllerRef,omitempty"` + + // Policies for selection. + // +optional + Policy *Policy `json:"policy,omitempty"` +} + +// SetGroupVersionKind sets the Kind and APIVersion of a TypedReference. +func (obj *TypedReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind gets the GroupVersionKind of a TypedReference. +func (obj *TypedReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +// GetObjectKind get the ObjectKind of a TypedReference. +func (obj *TypedReference) GetObjectKind() schema.ObjectKind { return obj } + +// TODO(negz): Rename Resource* to Managed* to clarify that they enable the +// resource.Managed interface. + +// A ResourceSpec defines the desired state of a managed resource. +type ResourceSpec struct { + // WriteConnectionSecretToReference specifies the namespace and name of a + // Secret to which any connection details for this managed resource should + // be written. Connection details frequently include the endpoint, username, + // and password required to connect to the managed resource. + // This field is planned to be replaced in a future release in favor of + // PublishConnectionDetailsTo. Currently, both could be set independently + // and connection details would be published to both without affecting + // each other. + // +optional + WriteConnectionSecretToReference *SecretReference `json:"writeConnectionSecretToRef,omitempty"` + + // PublishConnectionDetailsTo specifies the connection secret config which + // contains a name, metadata and a reference to secret store config to + // which any connection details for this managed resource should be written. + // Connection details frequently include the endpoint, username, + // and password required to connect to the managed resource. + // +optional + PublishConnectionDetailsTo *PublishConnectionDetailsTo `json:"publishConnectionDetailsTo,omitempty"` + + // ProviderConfigReference specifies how the provider that will be used to + // create, observe, update, and delete this managed resource should be + // configured. + // +kubebuilder:default={"name": "default"} + ProviderConfigReference *Reference `json:"providerConfigRef,omitempty"` + + // ProviderReference specifies the provider that will be used to create, + // observe, update, and delete this managed resource. + // Deprecated: Please use ProviderConfigReference, i.e. `providerConfigRef` + ProviderReference *Reference `json:"providerRef,omitempty"` + + // DeletionPolicy specifies what will happen to the underlying external + // when this managed resource is deleted - either "Delete" or "Orphan" the + // external resource. + // +optional + // +kubebuilder:default=Delete + DeletionPolicy DeletionPolicy `json:"deletionPolicy,omitempty"` +} + +// ResourceStatus represents the observed state of a managed resource. +type ResourceStatus struct { + ConditionedStatus `json:",inline"` +} + +// A CredentialsSource is a source from which provider credentials may be +// acquired. +type CredentialsSource string + +const ( + // CredentialsSourceNone indicates that a provider does not require + // credentials. + CredentialsSourceNone CredentialsSource = "None" + + // CredentialsSourceSecret indicates that a provider should acquire + // credentials from a secret. + CredentialsSourceSecret CredentialsSource = "Secret" + + // CredentialsSourceInjectedIdentity indicates that a provider should use + // credentials via its (pod's) identity; i.e. via IRSA for AWS, + // Workload Identity for GCP, Pod Identity for Azure, or in-cluster + // authentication for the Kubernetes API. + CredentialsSourceInjectedIdentity CredentialsSource = "InjectedIdentity" + + // CredentialsSourceEnvironment indicates that a provider should acquire + // credentials from an environment variable. + CredentialsSourceEnvironment CredentialsSource = "Environment" + + // CredentialsSourceFilesystem indicates that a provider should acquire + // credentials from the filesystem. + CredentialsSourceFilesystem CredentialsSource = "Filesystem" +) + +// CommonCredentialSelectors provides common selectors for extracting +// credentials. +type CommonCredentialSelectors struct { + // Fs is a reference to a filesystem location that contains credentials that + // must be used to connect to the provider. + // +optional + Fs *FsSelector `json:"fs,omitempty"` + + // Env is a reference to an environment variable that contains credentials + // that must be used to connect to the provider. + // +optional + Env *EnvSelector `json:"env,omitempty"` + + // A SecretRef is a reference to a secret key that contains the credentials + // that must be used to connect to the provider. + // +optional + SecretRef *SecretKeySelector `json:"secretRef,omitempty"` +} + +// EnvSelector selects an environment variable. +type EnvSelector struct { + // Name is the name of an environment variable. + Name string `json:"name"` +} + +// FsSelector selects a filesystem location. +type FsSelector struct { + // Path is a filesystem path. + Path string `json:"path"` +} + +// A ProviderConfigStatus defines the observed status of a ProviderConfig. +type ProviderConfigStatus struct { + ConditionedStatus `json:",inline"` + + // Users of this provider configuration. + Users int64 `json:"users,omitempty"` +} + +// A ProviderConfigUsage is a record that a particular managed resource is using +// a particular provider configuration. +type ProviderConfigUsage struct { + // ProviderConfigReference to the provider config being used. + ProviderConfigReference Reference `json:"providerConfigRef"` + + // ResourceReference to the managed resource using the provider config. + ResourceReference TypedReference `json:"resourceRef"` +} + +// A TargetSpec defines the common fields of objects used for exposing +// infrastructure to workloads that can be scheduled to. +// +// Deprecated. +type TargetSpec struct { + // WriteConnectionSecretToReference specifies the name of a Secret, in the + // same namespace as this target, to which any connection details for this + // target should be written or already exist. Connection secrets referenced + // by a target should contain information for connecting to a resource that + // allows for scheduling of workloads. + // +optional + WriteConnectionSecretToReference *LocalSecretReference `json:"connectionSecretRef,omitempty"` + + // A ResourceReference specifies an existing managed resource, in any + // namespace, which this target should attempt to propagate a connection + // secret from. + // +optional + ResourceReference *corev1.ObjectReference `json:"clusterRef,omitempty"` +} + +// A TargetStatus defines the observed status a target. +// +// Deprecated. +type TargetStatus struct { + ConditionedStatus `json:",inline"` +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..5f8a7e34 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/apis/common/v1/zz_generated.deepcopy.go @@ -0,0 +1,611 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonCredentialSelectors) DeepCopyInto(out *CommonCredentialSelectors) { + *out = *in + if in.Fs != nil { + in, out := &in.Fs, &out.Fs + *out = new(FsSelector) + **out = **in + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = new(EnvSelector) + **out = **in + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretKeySelector) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonCredentialSelectors. +func (in *CommonCredentialSelectors) DeepCopy() *CommonCredentialSelectors { + if in == nil { + return nil + } + out := new(CommonCredentialSelectors) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Condition) DeepCopyInto(out *Condition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Condition. +func (in *Condition) DeepCopy() *Condition { + if in == nil { + return nil + } + out := new(Condition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConditionedStatus) DeepCopyInto(out *ConditionedStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConditionedStatus. +func (in *ConditionedStatus) DeepCopy() *ConditionedStatus { + if in == nil { + return nil + } + out := new(ConditionedStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConnectionSecretMetadata) DeepCopyInto(out *ConnectionSecretMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(corev1.SecretType) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConnectionSecretMetadata. +func (in *ConnectionSecretMetadata) DeepCopy() *ConnectionSecretMetadata { + if in == nil { + return nil + } + out := new(ConnectionSecretMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvSelector) DeepCopyInto(out *EnvSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvSelector. +func (in *EnvSelector) DeepCopy() *EnvSelector { + if in == nil { + return nil + } + out := new(EnvSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FsSelector) DeepCopyInto(out *FsSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FsSelector. +func (in *FsSelector) DeepCopy() *FsSelector { + if in == nil { + return nil + } + out := new(FsSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesAuthConfig) DeepCopyInto(out *KubernetesAuthConfig) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesAuthConfig. +func (in *KubernetesAuthConfig) DeepCopy() *KubernetesAuthConfig { + if in == nil { + return nil + } + out := new(KubernetesAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KubernetesSecretStoreConfig) DeepCopyInto(out *KubernetesSecretStoreConfig) { + *out = *in + in.Auth.DeepCopyInto(&out.Auth) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubernetesSecretStoreConfig. +func (in *KubernetesSecretStoreConfig) DeepCopy() *KubernetesSecretStoreConfig { + if in == nil { + return nil + } + out := new(KubernetesSecretStoreConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSecretReference) DeepCopyInto(out *LocalSecretReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSecretReference. +func (in *LocalSecretReference) DeepCopy() *LocalSecretReference { + if in == nil { + return nil + } + out := new(LocalSecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MergeOptions) DeepCopyInto(out *MergeOptions) { + *out = *in + if in.KeepMapValues != nil { + in, out := &in.KeepMapValues, &out.KeepMapValues + *out = new(bool) + **out = **in + } + if in.AppendSlice != nil { + in, out := &in.AppendSlice, &out.AppendSlice + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MergeOptions. +func (in *MergeOptions) DeepCopy() *MergeOptions { + if in == nil { + return nil + } + out := new(MergeOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Resolve != nil { + in, out := &in.Resolve, &out.Resolve + *out = new(ResolvePolicy) + **out = **in + } + if in.Resolution != nil { + in, out := &in.Resolution, &out.Resolution + *out = new(ResolutionPolicy) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus. +func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus { + if in == nil { + return nil + } + out := new(ProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) { + *out = *in + in.ProviderConfigReference.DeepCopyInto(&out.ProviderConfigReference) + out.ResourceReference = in.ResourceReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage. +func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage { + if in == nil { + return nil + } + out := new(ProviderConfigUsage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PublishConnectionDetailsTo) DeepCopyInto(out *PublishConnectionDetailsTo) { + *out = *in + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = new(ConnectionSecretMetadata) + (*in).DeepCopyInto(*out) + } + if in.SecretStoreConfigRef != nil { + in, out := &in.SecretStoreConfigRef, &out.SecretStoreConfigRef + *out = new(Reference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PublishConnectionDetailsTo. +func (in *PublishConnectionDetailsTo) DeepCopy() *PublishConnectionDetailsTo { + if in == nil { + return nil + } + out := new(PublishConnectionDetailsTo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Reference) DeepCopyInto(out *Reference) { + *out = *in + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(Policy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Reference. +func (in *Reference) DeepCopy() *Reference { + if in == nil { + return nil + } + out := new(Reference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { + *out = *in + if in.WriteConnectionSecretToReference != nil { + in, out := &in.WriteConnectionSecretToReference, &out.WriteConnectionSecretToReference + *out = new(SecretReference) + **out = **in + } + if in.PublishConnectionDetailsTo != nil { + in, out := &in.PublishConnectionDetailsTo, &out.PublishConnectionDetailsTo + *out = new(PublishConnectionDetailsTo) + (*in).DeepCopyInto(*out) + } + if in.ProviderConfigReference != nil { + in, out := &in.ProviderConfigReference, &out.ProviderConfigReference + *out = new(Reference) + (*in).DeepCopyInto(*out) + } + if in.ProviderReference != nil { + in, out := &in.ProviderReference, &out.ProviderReference + *out = new(Reference) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. +func (in *ResourceSpec) DeepCopy() *ResourceSpec { + if in == nil { + return nil + } + out := new(ResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceStatus) DeepCopyInto(out *ResourceStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceStatus. +func (in *ResourceStatus) DeepCopy() *ResourceStatus { + if in == nil { + return nil + } + out := new(ResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.SecretReference = in.SecretReference +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretStoreConfig) DeepCopyInto(out *SecretStoreConfig) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(SecretStoreType) + **out = **in + } + if in.Kubernetes != nil { + in, out := &in.Kubernetes, &out.Kubernetes + *out = new(KubernetesSecretStoreConfig) + (*in).DeepCopyInto(*out) + } + if in.Vault != nil { + in, out := &in.Vault, &out.Vault + *out = new(VaultSecretStoreConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretStoreConfig. +func (in *SecretStoreConfig) DeepCopy() *SecretStoreConfig { + if in == nil { + return nil + } + out := new(SecretStoreConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Selector) DeepCopyInto(out *Selector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchControllerRef != nil { + in, out := &in.MatchControllerRef, &out.MatchControllerRef + *out = new(bool) + **out = **in + } + if in.Policy != nil { + in, out := &in.Policy, &out.Policy + *out = new(Policy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Selector. +func (in *Selector) DeepCopy() *Selector { + if in == nil { + return nil + } + out := new(Selector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetSpec) DeepCopyInto(out *TargetSpec) { + *out = *in + if in.WriteConnectionSecretToReference != nil { + in, out := &in.WriteConnectionSecretToReference, &out.WriteConnectionSecretToReference + *out = new(LocalSecretReference) + **out = **in + } + if in.ResourceReference != nil { + in, out := &in.ResourceReference, &out.ResourceReference + *out = new(corev1.ObjectReference) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetSpec. +func (in *TargetSpec) DeepCopy() *TargetSpec { + if in == nil { + return nil + } + out := new(TargetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetStatus) DeepCopyInto(out *TargetStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetStatus. +func (in *TargetStatus) DeepCopy() *TargetStatus { + if in == nil { + return nil + } + out := new(TargetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedReference) DeepCopyInto(out *TypedReference) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedReference. +func (in *TypedReference) DeepCopy() *TypedReference { + if in == nil { + return nil + } + out := new(TypedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAuthConfig) DeepCopyInto(out *VaultAuthConfig) { + *out = *in + if in.Token != nil { + in, out := &in.Token, &out.Token + *out = new(VaultAuthTokenConfig) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuthConfig. +func (in *VaultAuthConfig) DeepCopy() *VaultAuthConfig { + if in == nil { + return nil + } + out := new(VaultAuthConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultAuthTokenConfig) DeepCopyInto(out *VaultAuthTokenConfig) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultAuthTokenConfig. +func (in *VaultAuthTokenConfig) DeepCopy() *VaultAuthTokenConfig { + if in == nil { + return nil + } + out := new(VaultAuthTokenConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultCABundleConfig) DeepCopyInto(out *VaultCABundleConfig) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultCABundleConfig. +func (in *VaultCABundleConfig) DeepCopy() *VaultCABundleConfig { + if in == nil { + return nil + } + out := new(VaultCABundleConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VaultSecretStoreConfig) DeepCopyInto(out *VaultSecretStoreConfig) { + *out = *in + if in.Version != nil { + in, out := &in.Version, &out.Version + *out = new(VaultKVVersion) + **out = **in + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = new(VaultCABundleConfig) + (*in).DeepCopyInto(*out) + } + in.Auth.DeepCopyInto(&out.Auth) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VaultSecretStoreConfig. +func (in *VaultSecretStoreConfig) DeepCopy() *VaultSecretStoreConfig { + if in == nil { + return nil + } + out := new(VaultSecretStoreConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go new file mode 100644 index 00000000..b4001519 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/errors/errors.go @@ -0,0 +1,126 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors is a github.com/pkg/errors compatible API for native errors. +// It includes only the subset of the github.com/pkg/errors API that is used by +// the Crossplane project. +package errors + +import ( + "errors" + "fmt" +) + +// New returns an error that formats as the given text. Each call to New returns +// a distinct error value even if the text is identical. +func New(text string) error { return errors.New(text) } + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained +// by repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// +// An error type might provide an Is method so it can be treated as equivalent +// to an existing error. For example, if MyError defines +// +// func (m MyError) Is(target error) bool { return target == fs.ErrExist } +// +// then Is(MyError{}, fs.ErrExist) returns true. See syscall.Errno.Is for +// an example in the standard library. +func Is(err, target error) bool { return errors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. Otherwise, it returns false. +// +// The chain consists of err itself followed by the sequence of errors obtained +// by repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the +// value pointed to by target, or if the error has a method As(any) bool +// such that As(target) returns true. In the latter case, the As method is +// responsible for setting target. +// +// An error type might provide an As method so it can be treated as if it were a +// different error type. +// +// As panics if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. +func As(err error, target any) bool { return errors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's type +// contains an Unwrap method returning error. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { return errors.Unwrap(err) } + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// If the format specifier includes a %w verb with an error operand, the +// returned error will implement an Unwrap method returning the operand. It is +// invalid to include more than one %w verb or to supply it with an operand that +// does not implement the error interface. The %w verb is otherwise a synonym +// for %v. +func Errorf(format string, a ...any) error { return fmt.Errorf(format, a...) } + +// WithMessage annotates err with a new message. If err is nil, WithMessage +// returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return fmt.Errorf("%s: %w", message, err) +} + +// WithMessagef annotates err with the format specifier. If err is nil, +// WithMessagef returns nil. +func WithMessagef(err error, format string, args ...any) error { + if err == nil { + return nil + } + return fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err) +} + +// Wrap is an alias for WithMessage. +func Wrap(err error, message string) error { + return WithMessage(err, message) +} + +// Wrapf is an alias for WithMessagef +func Wrapf(err error, format string, args ...any) error { + return WithMessagef(err, format, args...) +} + +// Cause calls Unwrap on each error it finds. It returns the first error it +// finds that does not have an Unwrap method - i.e. the first error that was not +// the result of a Wrap call, a Wrapf call, or an Errorf call with %w wrapping. +func Cause(err error) error { + type wrapped interface { + Unwrap() error + } + + for err != nil { + //nolint:errorlint // We actually do want to check the outermost error. + w, ok := err.(wrapped) + if !ok { + return err + } + err = w.Unwrap() + } + + return err +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go new file mode 100644 index 00000000..67b4a15d --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/meta/meta.go @@ -0,0 +1,428 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta contains functions for dealing with Kubernetes object metadata. +package meta + +import ( + "fmt" + "hash/fnv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/errors" +) + +const ( + // AnnotationKeyExternalName is the key in the annotations map of a + // resource for the name of the resource as it appears on provider's + // systems. + AnnotationKeyExternalName = "crossplane.io/external-name" + + // AnnotationKeyExternalCreatePending is the key in the annotations map + // of a resource that indicates the last time creation of the external + // resource was pending (i.e. about to happen). Its value must be an + // RFC3999 timestamp. + AnnotationKeyExternalCreatePending = "crossplane.io/external-create-pending" + + // AnnotationKeyExternalCreateSucceeded is the key in the annotations + // map of a resource that represents the last time the external resource + // was created successfully. Its value must be an RFC3339 timestamp, + // which can be used to determine how long ago a resource was created. + // This is useful for eventually consistent APIs that may take some time + // before the API called by Observe will report that a recently created + // external resource exists. + AnnotationKeyExternalCreateSucceeded = "crossplane.io/external-create-succeeded" + + // AnnotationKeyExternalCreateFailed is the key in the annotations map + // of a resource that indicates the last time creation of the external + // resource failed. Its value must be an RFC3999 timestamp. + AnnotationKeyExternalCreateFailed = "crossplane.io/external-create-failed" + + // AnnotationKeyReconciliationPaused is the key in the annotations map + // of a resource that indicates that further reconciliations on the + // resource are paused. All create/update/delete/generic events on + // the resource will be filtered and thus no further reconcile requests + // will be queued for the resource. + AnnotationKeyReconciliationPaused = "crossplane.io/paused" +) + +// Supported resources with all of these annotations will be fully or partially +// propagated to the named resource of the same kind, assuming it exists and +// consents to propagation. +const ( + AnnotationKeyPropagateToPrefix = "to.propagate.crossplane.io/" + + // Deprecated: This functionality will be removed soon. + AnnotationKeyPropagateFromNamespace = "from.propagate.crossplane.io/namespace" + AnnotationKeyPropagateFromName = "from.propagate.crossplane.io/name" +) + +// ReferenceTo returns an object reference to the supplied object, presumed to +// be of the supplied group, version, and kind. +// Deprecated: use a more specific reference type, such as TypedReference or +// Reference instead of the overly verbose ObjectReference. +// See https://github.com/crossplane/crossplane-runtime/issues/49 +func ReferenceTo(o metav1.Object, of schema.GroupVersionKind) *corev1.ObjectReference { + v, k := of.ToAPIVersionAndKind() + return &corev1.ObjectReference{ + APIVersion: v, + Kind: k, + Namespace: o.GetNamespace(), + Name: o.GetName(), + UID: o.GetUID(), + } +} + +// TypedReferenceTo returns a typed object reference to the supplied object, +// presumed to be of the supplied group, version, and kind. +func TypedReferenceTo(o metav1.Object, of schema.GroupVersionKind) *xpv1.TypedReference { + v, k := of.ToAPIVersionAndKind() + return &xpv1.TypedReference{ + APIVersion: v, + Kind: k, + Name: o.GetName(), + UID: o.GetUID(), + } +} + +// AsOwner converts the supplied object reference to an owner reference. +func AsOwner(r *xpv1.TypedReference) metav1.OwnerReference { + return metav1.OwnerReference{ + APIVersion: r.APIVersion, + Kind: r.Kind, + Name: r.Name, + UID: r.UID, + } +} + +// AsController converts the supplied object reference to a controller +// reference. You may also consider using metav1.NewControllerRef. +func AsController(r *xpv1.TypedReference) metav1.OwnerReference { + t := true + ref := AsOwner(r) + ref.Controller = &t + ref.BlockOwnerDeletion = &t + return ref +} + +// HaveSameController returns true if both supplied objects are controlled by +// the same object. +func HaveSameController(a, b metav1.Object) bool { + ac := metav1.GetControllerOf(a) + bc := metav1.GetControllerOf(b) + + // We do not consider two objects without any controller to have + // the same controller. + if ac == nil || bc == nil { + return false + } + + return ac.UID == bc.UID +} + +// NamespacedNameOf returns the referenced object's namespaced name. +func NamespacedNameOf(r *corev1.ObjectReference) types.NamespacedName { + return types.NamespacedName{Namespace: r.Namespace, Name: r.Name} +} + +// AddOwnerReference to the supplied object' metadata. Any existing owner with +// the same UID as the supplied reference will be replaced. +func AddOwnerReference(o metav1.Object, r metav1.OwnerReference) { + refs := o.GetOwnerReferences() + for i := range refs { + if refs[i].UID == r.UID { + refs[i] = r + o.SetOwnerReferences(refs) + return + } + } + o.SetOwnerReferences(append(refs, r)) +} + +// AddControllerReference to the supplied object's metadata. Any existing owner +// with the same UID as the supplied reference will be replaced. Returns an +// error if the supplied object is already controlled by a different owner. +func AddControllerReference(o metav1.Object, r metav1.OwnerReference) error { + if c := metav1.GetControllerOf(o); c != nil && c.UID != r.UID { + return errors.Errorf("%s is already controlled by %s %s (UID %s)", o.GetName(), c.Kind, c.Name, c.UID) + } + + AddOwnerReference(o, r) + return nil +} + +// AddFinalizer to the supplied Kubernetes object's metadata. +func AddFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return + } + } + o.SetFinalizers(append(f, finalizer)) +} + +// RemoveFinalizer from the supplied Kubernetes object's metadata. +func RemoveFinalizer(o metav1.Object, finalizer string) { + f := o.GetFinalizers() + for i, e := range f { + if e == finalizer { + f = append(f[:i], f[i+1:]...) + } + } + o.SetFinalizers(f) +} + +// FinalizerExists checks whether given finalizer is already set. +func FinalizerExists(o metav1.Object, finalizer string) bool { + f := o.GetFinalizers() + for _, e := range f { + if e == finalizer { + return true + } + } + return false +} + +// AddLabels to the supplied object. +func AddLabels(o metav1.Object, labels map[string]string) { + l := o.GetLabels() + if l == nil { + o.SetLabels(labels) + return + } + for k, v := range labels { + l[k] = v + } + o.SetLabels(l) +} + +// RemoveLabels with the supplied keys from the supplied object. +func RemoveLabels(o metav1.Object, labels ...string) { + l := o.GetLabels() + if l == nil { + return + } + for _, k := range labels { + delete(l, k) + } + o.SetLabels(l) +} + +// AddAnnotations to the supplied object. +func AddAnnotations(o metav1.Object, annotations map[string]string) { + a := o.GetAnnotations() + if a == nil { + o.SetAnnotations(annotations) + return + } + for k, v := range annotations { + a[k] = v + } + o.SetAnnotations(a) +} + +// RemoveAnnotations with the supplied keys from the supplied object. +func RemoveAnnotations(o metav1.Object, annotations ...string) { + a := o.GetAnnotations() + if a == nil { + return + } + for _, k := range annotations { + delete(a, k) + } + o.SetAnnotations(a) +} + +// WasDeleted returns true if the supplied object was deleted from the API server. +func WasDeleted(o metav1.Object) bool { + return !o.GetDeletionTimestamp().IsZero() +} + +// WasCreated returns true if the supplied object was created in the API server. +func WasCreated(o metav1.Object) bool { + // This looks a little different from WasDeleted because DeletionTimestamp + // returns a reference while CreationTimestamp returns a value. + t := o.GetCreationTimestamp() + return !t.IsZero() +} + +// GetExternalName returns the external name annotation value on the resource. +func GetExternalName(o metav1.Object) string { + return o.GetAnnotations()[AnnotationKeyExternalName] +} + +// SetExternalName sets the external name annotation of the resource. +func SetExternalName(o metav1.Object, name string) { + AddAnnotations(o, map[string]string{AnnotationKeyExternalName: name}) +} + +// GetExternalCreatePending returns the time at which the external resource +// was most recently pending creation. +func GetExternalCreatePending(o metav1.Object) time.Time { + a := o.GetAnnotations()[AnnotationKeyExternalCreatePending] + t, err := time.Parse(time.RFC3339, a) + if err != nil { + return time.Time{} + } + return t +} + +// SetExternalCreatePending sets the time at which the external resource was +// most recently pending creation to the supplied time. +func SetExternalCreatePending(o metav1.Object, t time.Time) { + AddAnnotations(o, map[string]string{AnnotationKeyExternalCreatePending: t.Format(time.RFC3339)}) +} + +// GetExternalCreateSucceeded returns the time at which the external resource +// was most recently created. +func GetExternalCreateSucceeded(o metav1.Object) time.Time { + a := o.GetAnnotations()[AnnotationKeyExternalCreateSucceeded] + t, err := time.Parse(time.RFC3339, a) + if err != nil { + return time.Time{} + } + return t +} + +// SetExternalCreateSucceeded sets the time at which the external resource was +// most recently created to the supplied time. +func SetExternalCreateSucceeded(o metav1.Object, t time.Time) { + AddAnnotations(o, map[string]string{AnnotationKeyExternalCreateSucceeded: t.Format(time.RFC3339)}) +} + +// GetExternalCreateFailed returns the time at which the external resource +// recently failed to create. +func GetExternalCreateFailed(o metav1.Object) time.Time { + a := o.GetAnnotations()[AnnotationKeyExternalCreateFailed] + t, err := time.Parse(time.RFC3339, a) + if err != nil { + return time.Time{} + } + return t +} + +// SetExternalCreateFailed sets the time at which the external resource most +// recently failed to create. +func SetExternalCreateFailed(o metav1.Object, t time.Time) { + AddAnnotations(o, map[string]string{AnnotationKeyExternalCreateFailed: t.Format(time.RFC3339)}) +} + +// ExternalCreateIncomplete returns true if creation of the external resource +// appears to be incomplete. We deem creation to be incomplete if the 'external +// create pending' annotation is the newest of all tracking annotations that are +// set (i.e. pending, succeeded, and failed). +func ExternalCreateIncomplete(o metav1.Object) bool { + pending := GetExternalCreatePending(o) + succeeded := GetExternalCreateSucceeded(o) + failed := GetExternalCreateFailed(o) + + // If creation never started it can't be incomplete. + if pending.IsZero() { + return false + } + + latest := succeeded + if failed.After(succeeded) { + latest = failed + } + + return pending.After(latest) +} + +// ExternalCreateSucceededDuring returns true if creation of the external +// resource that corresponds to the supplied managed resource succeeded within +// the supplied duration. +func ExternalCreateSucceededDuring(o metav1.Object, d time.Duration) bool { + t := GetExternalCreateSucceeded(o) + if t.IsZero() { + return false + } + return time.Since(t) < d +} + +// AllowPropagation from one object to another by adding consenting annotations +// to both. +// Deprecated: This functionality will be removed soon. +func AllowPropagation(from, to metav1.Object) { + AddAnnotations(to, map[string]string{ + AnnotationKeyPropagateFromNamespace: from.GetNamespace(), + AnnotationKeyPropagateFromName: from.GetName(), + }) + + AddAnnotations(from, map[string]string{ + AnnotationKeyPropagateTo(to): to.GetNamespace() + "/" + to.GetName(), + }) +} + +// AnnotationKeyPropagateTo returns an annotation key whose presence indicates +// that the annotated object consents to propagation from the supplied object. +// The annotation name (which follows the prefix) can be anything that doesn't +// collide with another annotation. to.propagation.crossplane.io/example would +// be valid. This function uses a hash of the supplied object's namespace and +// name in order to avoid collisions and keep the suffix relatively short. +func AnnotationKeyPropagateTo(o metav1.Object) string { + // Writing to a hash never returns an error. + h := fnv.New32a() + h.Write([]byte(o.GetNamespace())) + h.Write([]byte(o.GetName())) + return fmt.Sprintf("%s%x", AnnotationKeyPropagateToPrefix, h.Sum32()) +} + +// AllowsPropagationFrom returns the NamespacedName of the object the supplied +// object should be propagated from. +func AllowsPropagationFrom(to metav1.Object) types.NamespacedName { + return types.NamespacedName{ + Namespace: to.GetAnnotations()[AnnotationKeyPropagateFromNamespace], + Name: to.GetAnnotations()[AnnotationKeyPropagateFromName], + } +} + +// AllowsPropagationTo returns the set of NamespacedNames that the supplied +// object may be propagated to. +func AllowsPropagationTo(from metav1.Object) map[types.NamespacedName]bool { + to := make(map[types.NamespacedName]bool) + + for k, v := range from.GetAnnotations() { + nn := strings.Split(v, "/") + switch { + case !strings.HasPrefix(k, AnnotationKeyPropagateToPrefix): + continue + case len(nn) != 2: + continue + case nn[0] == "": + continue + case nn[1] == "": + continue + } + to[types.NamespacedName{Namespace: nn[0], Name: nn[1]}] = true + } + + return to +} + +// IsPaused returns true if the object has the AnnotationKeyReconciliationPaused +// annotation set to `true`. +func IsPaused(o metav1.Object) bool { + return o.GetAnnotations()[AnnotationKeyReconciliationPaused] == "true" +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go new file mode 100644 index 00000000..5ec90624 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/api.go @@ -0,0 +1,262 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "encoding/json" + + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/meta" +) + +// Error strings. +const ( + errGetSecret = "cannot get managed resource's connection secret" + errSecretConflict = "cannot establish control of existing connection secret" + errUpdateSecret = "cannot update connection secret" + errCreateOrUpdateSecret = "cannot create or update connection secret" + + errUpdateObject = "cannot update object" +) + +// An APIManagedConnectionPropagator propagates connection details by reading +// them from and writing them to a Kubernetes API server. +// Deprecated: This functionality will be removed soon. +type APIManagedConnectionPropagator struct { + Propagator ConnectionPropagator +} + +// PropagateConnection details from the supplied resource. +func (a *APIManagedConnectionPropagator) PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, mg Managed) error { + return a.Propagator.PropagateConnection(ctx, to, mg) +} + +// An APIConnectionPropagator propagates connection details by reading +// them from and writing them to a Kubernetes API server. +// Deprecated: This functionality will be removed soon. +type APIConnectionPropagator struct { + client ClientApplicator + typer runtime.ObjectTyper +} + +// NewAPIConnectionPropagator returns a new APIConnectionPropagator. +// Deprecated: This functionality will be removed soon. +func NewAPIConnectionPropagator(c client.Client, t runtime.ObjectTyper) *APIConnectionPropagator { + return &APIConnectionPropagator{ + client: ClientApplicator{Client: c, Applicator: NewAPIUpdatingApplicator(c)}, + typer: t, + } +} + +// PropagateConnection details from the supplied resource. +func (a *APIConnectionPropagator) PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error { + // Either from does not expose a connection secret, or to does not want one. + if from.GetWriteConnectionSecretToReference() == nil || to.GetWriteConnectionSecretToReference() == nil { + return nil + } + + n := types.NamespacedName{ + Namespace: from.GetWriteConnectionSecretToReference().Namespace, + Name: from.GetWriteConnectionSecretToReference().Name, + } + fs := &corev1.Secret{} + if err := a.client.Get(ctx, n, fs); err != nil { + return errors.Wrap(err, errGetSecret) + } + + // Make sure the managed resource is the controller of the connection secret + // it references before we propagate it. This ensures a managed resource + // cannot use Crossplane to circumvent RBAC by propagating a secret it does + // not own. + if c := metav1.GetControllerOf(fs); c == nil || c.UID != from.GetUID() { + return errors.New(errSecretConflict) + } + + ts := LocalConnectionSecretFor(to, MustGetKind(to, a.typer)) + ts.Data = fs.Data + + meta.AllowPropagation(fs, ts) + + if err := a.client.Apply(ctx, ts, ConnectionSecretMustBeControllableBy(to.GetUID())); err != nil { + return errors.Wrap(err, errCreateOrUpdateSecret) + } + + return errors.Wrap(a.client.Update(ctx, fs), errUpdateSecret) +} + +// An APIPatchingApplicator applies changes to an object by either creating or +// patching it in a Kubernetes API server. +type APIPatchingApplicator struct { + client client.Client +} + +// NewAPIPatchingApplicator returns an Applicator that applies changes to an +// object by either creating or patching it in a Kubernetes API server. +func NewAPIPatchingApplicator(c client.Client) *APIPatchingApplicator { + return &APIPatchingApplicator{client: c} +} + +// Apply changes to the supplied object. The object will be created if it does +// not exist, or patched if it does. If the object does exist, it will only be +// patched if the passed object has the same or an empty resource version. +func (a *APIPatchingApplicator) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { + m, ok := o.(metav1.Object) + if !ok { + return errors.New("cannot access object metadata") + } + + if m.GetName() == "" && m.GetGenerateName() != "" { + return errors.Wrap(a.client.Create(ctx, o), "cannot create object") + } + + desired := o.DeepCopyObject() + + err := a.client.Get(ctx, types.NamespacedName{Name: m.GetName(), Namespace: m.GetNamespace()}, o) + if kerrors.IsNotFound(err) { + // TODO(negz): Apply ApplyOptions here too? + return errors.Wrap(a.client.Create(ctx, o), "cannot create object") + } + if err != nil { + return errors.Wrap(err, "cannot get object") + } + + for _, fn := range ao { + if err := fn(ctx, o, desired); err != nil { + return err + } + } + + // TODO(negz): Allow callers to override the kind of patch used. + return errors.Wrap(a.client.Patch(ctx, o, &patch{desired}), "cannot patch object") +} + +type patch struct{ from runtime.Object } + +func (p *patch) Type() types.PatchType { return types.MergePatchType } +func (p *patch) Data(_ client.Object) ([]byte, error) { return json.Marshal(p.from) } + +// An APIUpdatingApplicator applies changes to an object by either creating or +// updating it in a Kubernetes API server. +type APIUpdatingApplicator struct { + client client.Client +} + +// NewAPIUpdatingApplicator returns an Applicator that applies changes to an +// object by either creating or updating it in a Kubernetes API server. +func NewAPIUpdatingApplicator(c client.Client) *APIUpdatingApplicator { + return &APIUpdatingApplicator{client: c} +} + +// Apply changes to the supplied object. The object will be created if it does +// not exist, or updated if it does. +func (a *APIUpdatingApplicator) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { + m, ok := o.(Object) + if !ok { + return errors.New("cannot access object metadata") + } + + if m.GetName() == "" && m.GetGenerateName() != "" { + return errors.Wrap(a.client.Create(ctx, o), "cannot create object") + } + + current := o.DeepCopyObject().(client.Object) + + err := a.client.Get(ctx, types.NamespacedName{Name: m.GetName(), Namespace: m.GetNamespace()}, current) + if kerrors.IsNotFound(err) { + // TODO(negz): Apply ApplyOptions here too? + return errors.Wrap(a.client.Create(ctx, m), "cannot create object") + } + if err != nil { + return errors.Wrap(err, "cannot get object") + } + + for _, fn := range ao { + if err := fn(ctx, current, m); err != nil { + return err + } + } + + // NOTE(hasheddan): we must set the resource version of the desired object + // to that of the current or the update will always fail. + m.SetResourceVersion(current.(metav1.Object).GetResourceVersion()) + return errors.Wrap(a.client.Update(ctx, m), "cannot update object") +} + +// An APIFinalizer adds and removes finalizers to and from a resource. +type APIFinalizer struct { + client client.Client + finalizer string +} + +// NewNopFinalizer returns a Finalizer that does nothing. +func NewNopFinalizer() Finalizer { return nopFinalizer{} } + +type nopFinalizer struct{} + +func (f nopFinalizer) AddFinalizer(ctx context.Context, obj Object) error { + return nil +} +func (f nopFinalizer) RemoveFinalizer(ctx context.Context, obj Object) error { + return nil +} + +// NewAPIFinalizer returns a new APIFinalizer. +func NewAPIFinalizer(c client.Client, finalizer string) *APIFinalizer { + return &APIFinalizer{client: c, finalizer: finalizer} +} + +// AddFinalizer to the supplied Managed resource. +func (a *APIFinalizer) AddFinalizer(ctx context.Context, obj Object) error { + if meta.FinalizerExists(obj, a.finalizer) { + return nil + } + meta.AddFinalizer(obj, a.finalizer) + return errors.Wrap(a.client.Update(ctx, obj), errUpdateObject) +} + +// RemoveFinalizer from the supplied Managed resource. +func (a *APIFinalizer) RemoveFinalizer(ctx context.Context, obj Object) error { + if !meta.FinalizerExists(obj, a.finalizer) { + return nil + } + meta.RemoveFinalizer(obj, a.finalizer) + return errors.Wrap(IgnoreNotFound(a.client.Update(ctx, obj)), errUpdateObject) +} + +// A FinalizerFns satisfy the Finalizer interface. +type FinalizerFns struct { + AddFinalizerFn func(ctx context.Context, obj Object) error + RemoveFinalizerFn func(ctx context.Context, obj Object) error +} + +// AddFinalizer to the supplied resource. +func (f FinalizerFns) AddFinalizer(ctx context.Context, obj Object) error { + return f.AddFinalizerFn(ctx, obj) +} + +// RemoveFinalizer from the supplied resource. +func (f FinalizerFns) RemoveFinalizer(ctx context.Context, obj Object) error { + return f.RemoveFinalizerFn(ctx, obj) +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go new file mode 100644 index 00000000..cd13e74c --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package resource provides types and functions that can be used to build +// Kubernetes controllers that reconcile Crossplane resources. +package resource diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go new file mode 100644 index 00000000..7f74bda3 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/enqueue_handlers.go @@ -0,0 +1,67 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/workqueue" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type adder interface { + Add(item any) +} + +// EnqueueRequestForProviderConfig enqueues a reconcile.Request for a referenced +// ProviderConfig. +type EnqueueRequestForProviderConfig struct{} + +// Create adds a NamespacedName for the supplied CreateEvent if its Object is a +// ProviderConfigReferencer. +func (e *EnqueueRequestForProviderConfig) Create(evt event.CreateEvent, q workqueue.RateLimitingInterface) { + addProviderConfig(evt.Object, q) +} + +// Update adds a NamespacedName for the supplied UpdateEvent if its Objects are +// a ProviderConfigReferencer. +func (e *EnqueueRequestForProviderConfig) Update(evt event.UpdateEvent, q workqueue.RateLimitingInterface) { + addProviderConfig(evt.ObjectOld, q) + addProviderConfig(evt.ObjectNew, q) +} + +// Delete adds a NamespacedName for the supplied DeleteEvent if its Object is a +// ProviderConfigReferencer. +func (e *EnqueueRequestForProviderConfig) Delete(evt event.DeleteEvent, q workqueue.RateLimitingInterface) { + addProviderConfig(evt.Object, q) +} + +// Generic adds a NamespacedName for the supplied GenericEvent if its Object is +// a ProviderConfigReferencer. +func (e *EnqueueRequestForProviderConfig) Generic(evt event.GenericEvent, q workqueue.RateLimitingInterface) { + addProviderConfig(evt.Object, q) +} + +func addProviderConfig(obj runtime.Object, queue adder) { + pcr, ok := obj.(RequiredProviderConfigReferencer) + if !ok { + return + } + + queue.Add(reconcile.Request{NamespacedName: types.NamespacedName{Name: pcr.GetProviderConfigReference().Name}}) +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go new file mode 100644 index 00000000..cff316d7 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/interfaces.go @@ -0,0 +1,274 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// A Conditioned may have conditions set or retrieved. Conditions are typically +// indicate the status of both a resource and its reconciliation process. +type Conditioned interface { + SetConditions(c ...xpv1.Condition) + GetCondition(xpv1.ConditionType) xpv1.Condition +} + +// A ClaimReferencer may reference a resource claim. +type ClaimReferencer interface { + SetClaimReference(r *corev1.ObjectReference) + GetClaimReference() *corev1.ObjectReference +} + +// A ManagedResourceReferencer may reference a concrete managed resource. +type ManagedResourceReferencer interface { + SetResourceReference(r *corev1.ObjectReference) + GetResourceReference() *corev1.ObjectReference +} + +// A LocalConnectionSecretWriterTo may write a connection secret to its own +// namespace. +type LocalConnectionSecretWriterTo interface { + SetWriteConnectionSecretToReference(r *xpv1.LocalSecretReference) + GetWriteConnectionSecretToReference() *xpv1.LocalSecretReference +} + +// A ConnectionSecretWriterTo may write a connection secret to an arbitrary +// namespace. +type ConnectionSecretWriterTo interface { + SetWriteConnectionSecretToReference(r *xpv1.SecretReference) + GetWriteConnectionSecretToReference() *xpv1.SecretReference +} + +// A ConnectionDetailsPublisherTo may write a connection details secret to a +// secret store +type ConnectionDetailsPublisherTo interface { + SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) + GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo +} + +// An Orphanable resource may specify a DeletionPolicy. +type Orphanable interface { + SetDeletionPolicy(p xpv1.DeletionPolicy) + GetDeletionPolicy() xpv1.DeletionPolicy +} + +// A ProviderReferencer may reference a provider resource. +type ProviderReferencer interface { + GetProviderReference() *xpv1.Reference + SetProviderReference(p *xpv1.Reference) +} + +// A ProviderConfigReferencer may reference a provider config resource. +type ProviderConfigReferencer interface { + GetProviderConfigReference() *xpv1.Reference + SetProviderConfigReference(p *xpv1.Reference) +} + +// A RequiredProviderConfigReferencer may reference a provider config resource. +// Unlike ProviderConfigReferencer, the reference is required (i.e. not nil). +type RequiredProviderConfigReferencer interface { + GetProviderConfigReference() xpv1.Reference + SetProviderConfigReference(p xpv1.Reference) +} + +// A RequiredTypedResourceReferencer can reference a resource. +type RequiredTypedResourceReferencer interface { + SetResourceReference(r xpv1.TypedReference) + GetResourceReference() xpv1.TypedReference +} + +// A Finalizer manages the finalizers on the resource. +type Finalizer interface { + AddFinalizer(ctx context.Context, obj Object) error + RemoveFinalizer(ctx context.Context, obj Object) error +} + +// A CompositionSelector may select a composition of resources. +type CompositionSelector interface { + SetCompositionSelector(*metav1.LabelSelector) + GetCompositionSelector() *metav1.LabelSelector +} + +// A CompositionReferencer may reference a composition of resources. +type CompositionReferencer interface { + SetCompositionReference(*corev1.ObjectReference) + GetCompositionReference() *corev1.ObjectReference +} + +// A CompositionRevisionReferencer may reference a specific revision of a +// composition of resources. +type CompositionRevisionReferencer interface { + SetCompositionRevisionReference(*corev1.ObjectReference) + GetCompositionRevisionReference() *corev1.ObjectReference +} + +// A CompositionRevisionSelector may reference a set of +// composition revisions. +type CompositionRevisionSelector interface { + SetCompositionRevisionSelector(selector *metav1.LabelSelector) + GetCompositionRevisionSelector() *metav1.LabelSelector +} + +// A CompositionUpdater uses a composition, and may update which revision of +// that composition it uses. +type CompositionUpdater interface { + SetCompositionUpdatePolicy(*xpv1.UpdatePolicy) + GetCompositionUpdatePolicy() *xpv1.UpdatePolicy +} + +// A CompositeResourceDeleter creates a composite, and controls the policy +// used to delete the composite. +type CompositeResourceDeleter interface { + SetCompositeDeletePolicy(policy *xpv1.CompositeDeletePolicy) + GetCompositeDeletePolicy() *xpv1.CompositeDeletePolicy +} + +// A ComposedResourcesReferencer may reference the resources it composes. +type ComposedResourcesReferencer interface { + SetResourceReferences([]corev1.ObjectReference) + GetResourceReferences() []corev1.ObjectReference +} + +// A CompositeResourceReferencer can reference a composite resource. +type CompositeResourceReferencer interface { + SetResourceReference(r *corev1.ObjectReference) + GetResourceReference() *corev1.ObjectReference +} + +// An EnvironmentConfigReferencer references a list of EnvironmentConfigs. +type EnvironmentConfigReferencer interface { + SetEnvironmentConfigReferences([]corev1.ObjectReference) + GetEnvironmentConfigReferences() []corev1.ObjectReference +} + +// A UserCounter can count how many users it has. +type UserCounter interface { + SetUsers(i int64) + GetUsers() int64 +} + +// A ConnectionDetailsPublishedTimer can record the last time its connection +// details were published. +type ConnectionDetailsPublishedTimer interface { + SetConnectionDetailsLastPublishedTime(t *metav1.Time) + GetConnectionDetailsLastPublishedTime() *metav1.Time +} + +// An Object is a Kubernetes object. +type Object interface { + metav1.Object + runtime.Object +} + +// A Managed is a Kubernetes object representing a concrete managed +// resource (e.g. a CloudSQL instance). +type Managed interface { + Object + + ProviderReferencer + ProviderConfigReferencer + ConnectionSecretWriterTo + ConnectionDetailsPublisherTo + Orphanable + + Conditioned +} + +// A ManagedList is a list of managed resources. +type ManagedList interface { + client.ObjectList + + // GetItems returns the list of managed resources. + GetItems() []Managed +} + +// A ProviderConfig configures a Crossplane provider. +type ProviderConfig interface { + Object + + UserCounter + Conditioned +} + +// A ProviderConfigUsage indicates a usage of a Crossplane provider config. +type ProviderConfigUsage interface { + Object + + RequiredProviderConfigReferencer + RequiredTypedResourceReferencer +} + +// A ProviderConfigUsageList is a list of provider config usages. +type ProviderConfigUsageList interface { + client.ObjectList + + // GetItems returns the list of provider config usages. + GetItems() []ProviderConfigUsage +} + +// A Composite resource composes one or more Composed resources. +type Composite interface { + Object + + CompositionSelector + CompositionReferencer + CompositionUpdater + CompositionRevisionReferencer + CompositionRevisionSelector + ComposedResourcesReferencer + EnvironmentConfigReferencer + ClaimReferencer + ConnectionSecretWriterTo + ConnectionDetailsPublisherTo + + Conditioned + ConnectionDetailsPublishedTimer +} + +// Composed resources can be a composed into a Composite resource. +type Composed interface { + Object + + Conditioned + ConnectionSecretWriterTo + ConnectionDetailsPublisherTo +} + +// A CompositeClaim for a Composite resource. +type CompositeClaim interface { + Object + + CompositionSelector + CompositionReferencer + CompositionUpdater + CompositionRevisionReferencer + CompositionRevisionSelector + CompositeResourceDeleter + CompositeResourceReferencer + LocalConnectionSecretWriterTo + ConnectionDetailsPublisherTo + + Conditioned + ConnectionDetailsPublishedTimer +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go new file mode 100644 index 00000000..0bcbb7cf --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/late_initializer.go @@ -0,0 +1,86 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NewLateInitializer returns a new instance of *LateInitializer. +func NewLateInitializer() *LateInitializer { + return &LateInitializer{} +} + +// LateInitializer contains functions to late initialize two fields with varying +// types. The main purpose of LateInitializer is to be able to report whether +// anything different from the original value has been returned after all late +// initialization calls. +type LateInitializer struct { + changed bool +} + +// IsChanged reports whether the second argument is ever used in late initialization +// function calls. +func (li *LateInitializer) IsChanged() bool { + return li.changed +} + +// SetChanged marks the LateInitializer such that users can tell whether any +// of the late initialization calls returned the non-original argument. +func (li *LateInitializer) SetChanged() { + li.changed = true +} + +// LateInitializeStringPtr implements late initialization for *string. +func (li *LateInitializer) LateInitializeStringPtr(org *string, from *string) *string { + if org != nil || from == nil { + return org + } + li.SetChanged() + return from +} + +// LateInitializeInt64Ptr implements late initialization for *int64. +func (li *LateInitializer) LateInitializeInt64Ptr(org *int64, from *int64) *int64 { + if org != nil || from == nil { + return org + } + li.SetChanged() + return from +} + +// LateInitializeBoolPtr implements late initialization for *bool. +func (li *LateInitializer) LateInitializeBoolPtr(org *bool, from *bool) *bool { + if org != nil || from == nil { + return org + } + li.SetChanged() + return from +} + +// LateInitializeTimePtr implements late initialization for *metav1.Time from +// *time.Time. +func (li *LateInitializer) LateInitializeTimePtr(org *metav1.Time, from *time.Time) *metav1.Time { + if org != nil || from == nil { + return org + } + li.SetChanged() + t := metav1.NewTime(*from) + return &t +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go new file mode 100644 index 00000000..ca441f21 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/predicates.go @@ -0,0 +1,148 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "github.com/crossplane/crossplane-runtime/pkg/meta" +) + +// A PredicateFn returns true if the supplied object should be reconciled. +type PredicateFn func(obj runtime.Object) bool + +// NewPredicates returns a set of Funcs that are all satisfied by the supplied +// PredicateFn. The PredicateFn is run against the new object during updates. +func NewPredicates(fn PredicateFn) predicate.Funcs { + return predicate.Funcs{ + CreateFunc: func(e event.CreateEvent) bool { return fn(e.Object) }, + DeleteFunc: func(e event.DeleteEvent) bool { return fn(e.Object) }, + UpdateFunc: func(e event.UpdateEvent) bool { return fn(e.ObjectNew) }, + GenericFunc: func(e event.GenericEvent) bool { return fn(e.Object) }, + } +} + +// AnyOf accepts objects that pass any of the supplied predicate functions. +func AnyOf(fn ...PredicateFn) PredicateFn { + return func(obj runtime.Object) bool { + for _, f := range fn { + if f(obj) { + return true + } + } + return false + } +} + +// AllOf accepts objects that pass all of the supplied predicate functions. +func AllOf(fn ...PredicateFn) PredicateFn { + return func(obj runtime.Object) bool { + for _, f := range fn { + if !f(obj) { + return false + } + } + return true + } +} + +// HasManagedResourceReferenceKind accepts objects that reference the supplied +// managed resource kind. +func HasManagedResourceReferenceKind(k ManagedKind) PredicateFn { + return func(obj runtime.Object) bool { + r, ok := obj.(ManagedResourceReferencer) + if !ok { + return false + } + + if r.GetResourceReference() == nil { + return false + } + + return r.GetResourceReference().GroupVersionKind() == schema.GroupVersionKind(k) + } +} + +// IsManagedKind accepts objects that are of the supplied managed resource kind. +func IsManagedKind(k ManagedKind, ot runtime.ObjectTyper) PredicateFn { + return func(obj runtime.Object) bool { + gvk, err := GetKind(obj, ot) + if err != nil { + return false + } + return gvk == schema.GroupVersionKind(k) + } +} + +// IsControlledByKind accepts objects that are controlled by a resource of the +// supplied kind. +func IsControlledByKind(k schema.GroupVersionKind) PredicateFn { + return func(obj runtime.Object) bool { + mo, ok := obj.(metav1.Object) + if !ok { + return false + } + + ref := metav1.GetControllerOf(mo) + if ref == nil { + return false + } + + return ref.APIVersion == k.GroupVersion().String() && ref.Kind == k.Kind + } +} + +// IsPropagator accepts objects that request to be partially or fully propagated +// to another object of the same kind. +func IsPropagator() PredicateFn { + return func(obj runtime.Object) bool { + from, ok := obj.(metav1.Object) + if !ok { + return false + } + + return len(meta.AllowsPropagationTo(from)) > 0 + } +} + +// IsPropagated accepts objects that consent to be partially or fully propagated +// from another object of the same kind. +func IsPropagated() PredicateFn { + return func(obj runtime.Object) bool { + to, ok := obj.(metav1.Object) + if !ok { + return false + } + nn := meta.AllowsPropagationFrom(to) + return nn.Namespace != "" && nn.Name != "" + } +} + +// IsNamed accepts objects that is named as the given name. +func IsNamed(name string) PredicateFn { + return func(obj runtime.Object) bool { + mo, ok := obj.(metav1.Object) + if !ok { + return false + } + return mo.GetName() == name + } +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go new file mode 100644 index 00000000..e42508bd --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/providerconfig.go @@ -0,0 +1,165 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "os" + + "github.com/spf13/afero" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/meta" +) + +const ( + errExtractEnv = "cannot extract from environment variable when none specified" + errExtractFs = "cannot extract from filesystem when no path specified" + errExtractSecretKey = "cannot extract from secret key when none specified" + errGetCredentialsSecret = "cannot get credentials secret" + errNoHandlerForSourceFmt = "no extraction handler registered for source: %s" + errMissingPCRef = "managed resource does not reference a ProviderConfig" + errApplyPCU = "cannot apply ProviderConfigUsage" +) + +type errMissingRef struct{ error } + +func (m errMissingRef) MissingReference() bool { return true } + +// IsMissingReference returns true if an error indicates that a managed +// resource is missing a required reference.. +func IsMissingReference(err error) bool { + _, ok := err.(interface { //nolint: errorlint // Skip errorlint for interface type + MissingReference() bool + }) + return ok +} + +// EnvLookupFn looks up an environment variable. +type EnvLookupFn func(string) string + +// ExtractEnv extracts credentials from an environment variable. +func ExtractEnv(ctx context.Context, e EnvLookupFn, s xpv1.CommonCredentialSelectors) ([]byte, error) { + if s.Env == nil { + return nil, errors.New(errExtractEnv) + } + return []byte(e(s.Env.Name)), nil +} + +// ExtractFs extracts credentials from the filesystem. +func ExtractFs(ctx context.Context, fs afero.Fs, s xpv1.CommonCredentialSelectors) ([]byte, error) { + if s.Fs == nil { + return nil, errors.New(errExtractFs) + } + return afero.ReadFile(fs, s.Fs.Path) +} + +// ExtractSecret extracts credentials from a Kubernetes secret. +func ExtractSecret(ctx context.Context, client client.Client, s xpv1.CommonCredentialSelectors) ([]byte, error) { + if s.SecretRef == nil { + return nil, errors.New(errExtractSecretKey) + } + secret := &corev1.Secret{} + if err := client.Get(ctx, types.NamespacedName{Namespace: s.SecretRef.Namespace, Name: s.SecretRef.Name}, secret); err != nil { + return nil, errors.Wrap(err, errGetCredentialsSecret) + } + return secret.Data[s.SecretRef.Key], nil +} + +// CommonCredentialExtractor extracts credentials from common sources. +func CommonCredentialExtractor(ctx context.Context, source xpv1.CredentialsSource, client client.Client, selector xpv1.CommonCredentialSelectors) ([]byte, error) { + switch source { + case xpv1.CredentialsSourceEnvironment: + return ExtractEnv(ctx, os.Getenv, selector) + case xpv1.CredentialsSourceFilesystem: + return ExtractFs(ctx, afero.NewOsFs(), selector) + case xpv1.CredentialsSourceSecret: + return ExtractSecret(ctx, client, selector) + case xpv1.CredentialsSourceNone: + return nil, nil + case xpv1.CredentialsSourceInjectedIdentity: + // There is no common injected identity extractor. Each provider must + // implement their own. + fallthrough + default: + return nil, errors.Errorf(errNoHandlerForSourceFmt, source) + } +} + +// A Tracker tracks managed resources. +type Tracker interface { + // Track the supplied managed resource. + Track(ctx context.Context, mg Managed) error +} + +// A TrackerFn is a function that tracks managed resources. +type TrackerFn func(ctx context.Context, mg Managed) error + +// Track the supplied managed resource. +func (fn TrackerFn) Track(ctx context.Context, mg Managed) error { + return fn(ctx, mg) +} + +// A ProviderConfigUsageTracker tracks usages of a ProviderConfig by creating or +// updating the appropriate ProviderConfigUsage. +type ProviderConfigUsageTracker struct { + c Applicator + of ProviderConfigUsage +} + +// NewProviderConfigUsageTracker creates a ProviderConfigUsageTracker. +func NewProviderConfigUsageTracker(c client.Client, of ProviderConfigUsage) *ProviderConfigUsageTracker { + return &ProviderConfigUsageTracker{c: NewAPIUpdatingApplicator(c), of: of} +} + +// Track that the supplied Managed resource is using the ProviderConfig it +// references by creating or updating a ProviderConfigUsage. Track should be +// called _before_ attempting to use the ProviderConfig. This ensures the +// managed resource's usage is updated if the managed resource is updated to +// reference a misconfigured ProviderConfig. +func (u *ProviderConfigUsageTracker) Track(ctx context.Context, mg Managed) error { + pcu := u.of.DeepCopyObject().(ProviderConfigUsage) + gvk := mg.GetObjectKind().GroupVersionKind() + ref := mg.GetProviderConfigReference() + if ref == nil { + return errMissingRef{errors.New(errMissingPCRef)} + } + + pcu.SetName(string(mg.GetUID())) + pcu.SetLabels(map[string]string{xpv1.LabelKeyProviderName: ref.Name}) + pcu.SetOwnerReferences([]metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(mg, gvk))}) + pcu.SetProviderConfigReference(xpv1.Reference{Name: ref.Name}) + pcu.SetResourceReference(xpv1.TypedReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: mg.GetName(), + }) + + err := u.c.Apply(ctx, pcu, + MustBeControllableBy(mg.GetUID()), + AllowUpdateIf(func(current, _ runtime.Object) bool { + return current.(ProviderConfigUsage).GetProviderConfigReference() != pcu.GetProviderConfigReference() + }), + ) + return errors.Wrap(Ignore(IsNotAllowed, err), errApplyPCU) +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go new file mode 100644 index 00000000..a596106f --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/reference.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// ReferenceStatusType is an enum type for the possible values for a Reference Status +type ReferenceStatusType int + +// Reference statuses. +const ( + ReferenceStatusUnknown ReferenceStatusType = iota + ReferenceNotFound + ReferenceNotReady + ReferenceReady +) + +func (t ReferenceStatusType) String() string { + return []string{"Unknown", "NotFound", "NotReady", "Ready"}[t] +} + +// ReferenceStatus has the name and status of a reference +type ReferenceStatus struct { + Name string + Status ReferenceStatusType +} + +func (r ReferenceStatus) String() string { + return fmt.Sprintf("{reference:%s status:%s}", r.Name, r.Status) +} + +// A CanReference is a resource that can reference another resource in its +// spec in order to automatically resolve corresponding spec field values +// by inspecting the referenced resource. +type CanReference runtime.Object + +// An AttributeReferencer resolves cross-resource attribute references. See +// https://github.com/crossplane/crossplane/blob/master/design/one-pager-cross-resource-referencing.md +// for more information +type AttributeReferencer interface { + // GetStatus retries the referenced resource, as well as other non-managed + // resources (like a `Provider`) and reports their readiness for use as a + // referenced resource. + GetStatus(ctx context.Context, res CanReference, r client.Reader) ([]ReferenceStatus, error) + + // Build retrieves the referenced resource, as well as other non-managed + // resources (like a `Provider`), and builds the referenced attribute, + // returning it as a string value. + Build(ctx context.Context, res CanReference, r client.Reader) (value string, err error) + + // Assign accepts a managed resource object, and assigns the given value to + // its corresponding property. + Assign(res CanReference, value string) error +} diff --git a/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go new file mode 100644 index 00000000..bfeb7c60 --- /dev/null +++ b/vendor/github.com/crossplane/crossplane-runtime/pkg/resource/resource.go @@ -0,0 +1,418 @@ +/* +Copyright 2019 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "context" + "strings" + + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "sigs.k8s.io/controller-runtime/pkg/client" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/crossplane/crossplane-runtime/pkg/errors" + "github.com/crossplane/crossplane-runtime/pkg/meta" +) + +// SecretTypeConnection is the type of Crossplane connection secrets. +const SecretTypeConnection corev1.SecretType = "connection.crossplane.io/v1alpha1" + +// External resources are tagged/labelled with the following keys in the cloud +// provider API if the type supports. +const ( + ExternalResourceTagKeyKind = "crossplane-kind" + ExternalResourceTagKeyName = "crossplane-name" + ExternalResourceTagKeyProvider = "crossplane-providerconfig" +) + +// A ManagedKind contains the type metadata for a kind of managed resource. +type ManagedKind schema.GroupVersionKind + +// A CompositeKind contains the type metadata for a kind of composite resource. +type CompositeKind schema.GroupVersionKind + +// A CompositeClaimKind contains the type metadata for a kind of composite +// resource claim. +type CompositeClaimKind schema.GroupVersionKind + +// ProviderConfigKinds contains the type metadata for a kind of provider config. +type ProviderConfigKinds struct { + Config schema.GroupVersionKind + Usage schema.GroupVersionKind + UsageList schema.GroupVersionKind +} + +// A ConnectionSecretOwner is a Kubernetes object that owns a connection secret. +type ConnectionSecretOwner interface { + Object + + ConnectionSecretWriterTo + ConnectionDetailsPublisherTo +} + +// A LocalConnectionSecretOwner may create and manage a connection secret in its +// own namespace. +type LocalConnectionSecretOwner interface { + runtime.Object + metav1.Object + + LocalConnectionSecretWriterTo + ConnectionDetailsPublisherTo +} + +// A ConnectionPropagator is responsible for propagating information required to +// connect to a resource. +// Deprecated: This functionality will be removed soon. +type ConnectionPropagator interface { + PropagateConnection(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error +} + +// A ConnectionPropagatorFn is a function that satisfies the +// ConnectionPropagator interface. +type ConnectionPropagatorFn func(ctx context.Context, to LocalConnectionSecretOwner, from ConnectionSecretOwner) error + +// A ManagedConnectionPropagator is responsible for propagating information +// required to connect to a managed resource (for example the connection secret) +// from the managed resource to a target. +// Deprecated: This functionality will be removed soon. +type ManagedConnectionPropagator interface { + PropagateConnection(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error +} + +// A ManagedConnectionPropagatorFn is a function that satisfies the +// ManagedConnectionPropagator interface. +type ManagedConnectionPropagatorFn func(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error + +// PropagateConnection information from the supplied managed resource to the +// supplied resource claim. +func (fn ManagedConnectionPropagatorFn) PropagateConnection(ctx context.Context, o LocalConnectionSecretOwner, mg Managed) error { + return fn(ctx, o, mg) +} + +// LocalConnectionSecretFor creates a connection secret in the namespace of the +// supplied LocalConnectionSecretOwner, assumed to be of the supplied kind. +func LocalConnectionSecretFor(o LocalConnectionSecretOwner, kind schema.GroupVersionKind) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: o.GetNamespace(), + Name: o.GetWriteConnectionSecretToReference().Name, + OwnerReferences: []metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(o, kind))}, + }, + Type: SecretTypeConnection, + Data: make(map[string][]byte), + } +} + +// ConnectionSecretFor creates a connection for the supplied +// ConnectionSecretOwner, assumed to be of the supplied kind. The secret is +// written to 'default' namespace if the ConnectionSecretOwner does not specify +// a namespace. +func ConnectionSecretFor(o ConnectionSecretOwner, kind schema.GroupVersionKind) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: o.GetWriteConnectionSecretToReference().Namespace, + Name: o.GetWriteConnectionSecretToReference().Name, + OwnerReferences: []metav1.OwnerReference{meta.AsController(meta.TypedReferenceTo(o, kind))}, + }, + Type: SecretTypeConnection, + Data: make(map[string][]byte), + } +} + +// MustCreateObject returns a new Object of the supplied kind. It panics if the +// kind is unknown to the supplied ObjectCreator. +func MustCreateObject(kind schema.GroupVersionKind, oc runtime.ObjectCreater) runtime.Object { + obj, err := oc.New(kind) + if err != nil { + panic(err) + } + return obj +} + +// GetKind returns the GroupVersionKind of the supplied object. It return an +// error if the object is unknown to the supplied ObjectTyper, the object is +// unversioned, or the object does not have exactly one registered kind. +func GetKind(obj runtime.Object, ot runtime.ObjectTyper) (schema.GroupVersionKind, error) { + kinds, unversioned, err := ot.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, errors.Wrap(err, "cannot get kind of supplied object") + } + if unversioned { + return schema.GroupVersionKind{}, errors.New("supplied object is unversioned") + } + if len(kinds) != 1 { + return schema.GroupVersionKind{}, errors.New("supplied object does not have exactly one kind") + } + return kinds[0], nil +} + +// MustGetKind returns the GroupVersionKind of the supplied object. It panics if +// the object is unknown to the supplied ObjectTyper, the object is unversioned, +// or the object does not have exactly one registered kind. +func MustGetKind(obj runtime.Object, ot runtime.ObjectTyper) schema.GroupVersionKind { + gvk, err := GetKind(obj, ot) + if err != nil { + panic(err) + } + return gvk +} + +// An ErrorIs function returns true if an error satisfies a particular condition. +type ErrorIs func(err error) bool + +// Ignore any errors that satisfy the supplied ErrorIs function by returning +// nil. Errors that do not satisfy the supplied function are returned unmodified. +func Ignore(is ErrorIs, err error) error { + if is(err) { + return nil + } + return err +} + +// IgnoreAny ignores errors that satisfy any of the supplied ErrorIs functions +// by returning nil. Errors that do not satisfy any of the supplied functions +// are returned unmodified. +func IgnoreAny(err error, is ...ErrorIs) error { + for _, f := range is { + if f(err) { + return nil + } + } + return err +} + +// IgnoreNotFound returns the supplied error, or nil if the error indicates a +// Kubernetes resource was not found. +func IgnoreNotFound(err error) error { + return Ignore(kerrors.IsNotFound, err) +} + +// IsAPIError returns true if the given error's type is of Kubernetes API error. +func IsAPIError(err error) bool { + _, ok := err.(kerrors.APIStatus) //nolint: errorlint // we assert against the kerrors.APIStatus Interface which does not implement the error interface + return ok +} + +// IsAPIErrorWrapped returns true if err is a K8s API error, or recursively wraps a K8s API error +func IsAPIErrorWrapped(err error) bool { + return IsAPIError(errors.Cause(err)) +} + +// IsConditionTrue returns if condition status is true +func IsConditionTrue(c xpv1.Condition) bool { + return c.Status == corev1.ConditionTrue +} + +// An Applicator applies changes to an object. +type Applicator interface { + Apply(context.Context, client.Object, ...ApplyOption) error +} + +type shouldRetryFunc func(error) bool + +// An ApplicatorWithRetry applies changes to an object, retrying on transient failures +type ApplicatorWithRetry struct { + Applicator + shouldRetry shouldRetryFunc + backoff wait.Backoff +} + +// Apply invokes nested Applicator's Apply retrying on designated errors +func (awr *ApplicatorWithRetry) Apply(ctx context.Context, c client.Object, opts ...ApplyOption) error { + return retry.OnError(awr.backoff, awr.shouldRetry, func() error { + return awr.Applicator.Apply(ctx, c, opts...) + }) +} + +// NewApplicatorWithRetry returns an ApplicatorWithRetry for the specified +// applicator and with the specified retry function. +// +// If backoff is nil, then retry.DefaultRetry is used as the default. +func NewApplicatorWithRetry(applicator Applicator, shouldRetry shouldRetryFunc, backoff *wait.Backoff) *ApplicatorWithRetry { + result := &ApplicatorWithRetry{ + Applicator: applicator, + shouldRetry: shouldRetry, + backoff: retry.DefaultRetry, + } + + if backoff != nil { + result.backoff = *backoff + } + + return result +} + +// A ClientApplicator may be used to build a single 'client' that satisfies both +// client.Client and Applicator. +type ClientApplicator struct { + client.Client + Applicator +} + +// An ApplyFn is a function that satisfies the Applicator interface. +type ApplyFn func(context.Context, client.Object, ...ApplyOption) error + +// Apply changes to the supplied object. +func (fn ApplyFn) Apply(ctx context.Context, o client.Object, ao ...ApplyOption) error { + return fn(ctx, o, ao...) +} + +// An ApplyOption is called before patching the current object to match the +// desired object. ApplyOptions are not called if no current object exists. +type ApplyOption func(ctx context.Context, current, desired runtime.Object) error + +// UpdateFn returns an ApplyOption that is used to modify the current object to +// match fields of the desired. +func UpdateFn(fn func(current, desired runtime.Object)) ApplyOption { + return func(_ context.Context, c, d runtime.Object) error { + fn(c, d) + return nil + } +} + +type errNotControllable struct{ error } + +func (e errNotControllable) NotControllable() bool { + return true +} + +// IsNotControllable returns true if the supplied error indicates that a +// resource is not controllable - i.e. that it another resource is not and may +// not become its controller reference. +func IsNotControllable(err error) bool { + _, ok := err.(interface { //nolint: errorlint // Skip errorlint for interface type + NotControllable() bool + }) + return ok +} + +// MustBeControllableBy requires that the current object is controllable by an +// object with the supplied UID. An object is controllable if its controller +// reference matches the supplied UID, or it has no controller reference. An +// error that satisfies IsNotControllable will be returned if the current object +// cannot be controlled by the supplied UID. +func MustBeControllableBy(u types.UID) ApplyOption { + return func(_ context.Context, current, _ runtime.Object) error { + c := metav1.GetControllerOf(current.(metav1.Object)) + if c == nil { + return nil + } + + if c.UID != u { + return errNotControllable{errors.Errorf("existing object is not controlled by UID %q", u)} + + } + return nil + } +} + +// ConnectionSecretMustBeControllableBy requires that the current object is a +// connection secret that is controllable by an object with the supplied UID. +// Contemporary connection secrets are of SecretTypeConnection, while legacy +// connection secrets are of corev1.SecretTypeOpaque. Contemporary connection +// secrets are considered controllable if they are already controlled by the +// supplied UID, or have no controller reference. Legacy connection secrets are +// only considered controllable if they are already controlled by the supplied +// UID. It is not safe to assume legacy connection secrets without a controller +// reference are controllable because they are indistinguishable from Kubernetes +// secrets that have nothing to do with Crossplane. An error that satisfies +// IsNotControllable will be returned if the current secret is not a connection +// secret or cannot be controlled by the supplied UID. +func ConnectionSecretMustBeControllableBy(u types.UID) ApplyOption { + return func(_ context.Context, current, _ runtime.Object) error { + s := current.(*corev1.Secret) + c := metav1.GetControllerOf(s) + + switch { + case c == nil && s.Type != SecretTypeConnection: + return errNotControllable{errors.Errorf("refusing to modify uncontrolled secret of type %q", s.Type)} + case c == nil: + return nil + case c.UID != u: + return errNotControllable{errors.Errorf("existing secret is not controlled by UID %q", u)} + } + + return nil + } +} + +type errNotAllowed struct{ error } + +func (e errNotAllowed) NotAllowed() bool { + return true +} + +// NewNotAllowed returns a new NotAllowed error +func NewNotAllowed(message string) error { + return errNotAllowed{error: errors.New(message)} +} + +// IsNotAllowed returns true if the supplied error indicates that an operation +// was not allowed. +func IsNotAllowed(err error) bool { + _, ok := err.(interface { //nolint: errorlint // Skip errorlint for interface type + NotAllowed() bool + }) + return ok +} + +// AllowUpdateIf will only update the current object if the supplied fn returns +// true. An error that satisfies IsNotAllowed will be returned if the supplied +// function returns false. Creation of a desired object that does not currently +// exist is always allowed. +func AllowUpdateIf(fn func(current, desired runtime.Object) bool) ApplyOption { + return func(_ context.Context, current, desired runtime.Object) error { + if fn(current, desired) { + return nil + } + return errNotAllowed{errors.New("update not allowed")} + } +} + +// Apply changes to the supplied object. The object will be created if it does +// not exist, or patched if it does. +// +// Deprecated: use APIPatchingApplicator instead. +func Apply(ctx context.Context, c client.Client, o client.Object, ao ...ApplyOption) error { + return NewAPIPatchingApplicator(c).Apply(ctx, o, ao...) +} + +// GetExternalTags returns the identifying tags to be used to tag the external +// resource in provider API. +func GetExternalTags(mg Managed) map[string]string { + tags := map[string]string{ + ExternalResourceTagKeyKind: strings.ToLower(mg.GetObjectKind().GroupVersionKind().GroupKind().String()), + ExternalResourceTagKeyName: mg.GetName(), + } + + switch { + case mg.GetProviderConfigReference() != nil && mg.GetProviderConfigReference().Name != "": + tags[ExternalResourceTagKeyProvider] = mg.GetProviderConfigReference().Name + // TODO(muvaf): Remove the branch once Provider type has been removed from + // everywhere. + case mg.GetProviderReference() != nil && mg.GetProviderReference().Name != "": + tags[ExternalResourceTagKeyProvider] = mg.GetProviderReference().Name + } + return tags +} diff --git a/vendor/github.com/edgefarm/provider-nats/LICENSE b/vendor/github.com/edgefarm/provider-nats/LICENSE new file mode 100644 index 00000000..ae6b59dc --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Crossplane Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/config_types.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/config_types.go new file mode 100644 index 00000000..04d36484 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/config_types.go @@ -0,0 +1,108 @@ +package v1alpha1 + +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + + "github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer" +) + +// ConsumerParameters are the configurable fields of a consumer. +type ConsumerParameters struct { + // Stream is the name of the Jetstream stream the consumer is created for. + // +kubebuilder:validation:Required + Stream string `json:"stream"` + + // Domain is the domain of the Jetstream stream the consumer is created for. + // +kubebuilder:validation:Optional + Domain string `json:"domain,omitempty"` + + // Config is the consumer configuration. + // +kubebuilder:validation:Required + Config consumer.ConsumerConfig `json:"config"` +} + +// ConsumerObservation are the observable fields of a consumer. +type ConsumerObservation struct { + // State is the current state of the consumer + State consumer.ConsumerObservationState `json:"state,omitempty"` +} + +// A ConsumerSpec defines the desired state of a consumer. +type ConsumerSpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider ConsumerParameters `json:"forProvider"` +} + +// A ConsumerStatus represents the observed state of a consumer. +type ConsumerStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider ConsumerObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +genclient +// +genclient:nonNamespaced + +// A Consumer is an example API type. +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="DOMAIN",type="string",JSONPath=".spec.forProvider.domain" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="PUSH CONSUMER",type="string",priority=1,JSONPath=".status.atProvider.state.pushBound" +// +kubebuilder:printcolumn:name="STREAM",type="string",priority=1,JSONPath=".status.atProvider.state.streamName" +// +kubebuilder:printcolumn:name="UNPROCESSED",type="string",priority=1,JSONPath=".status.atProvider.state.numPending" +// +kubebuilder:printcolumn:name="REDELIVERERD",type="string",priority=1,JSONPath=".status.atProvider.state.numRedelivered" +// +kubebuilder:printcolumn:name="ACK PENDING",type="string",priority=1,JSONPath=".status.atProvider.state.numAckPending" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,nats} +type Consumer struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ConsumerSpec `json:"spec"` + Status ConsumerStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ConsumerList contains a list of consumer +type ConsumerList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Consumer `json:"items"` +} + +// Consumer type metadata. +var ( + ConsumerKind = reflect.TypeOf(Consumer{}).Name() + ConsumerGroupKind = schema.GroupKind{Group: Group, Kind: ConsumerKind}.String() + ConsumerKindAPIVersion = ConsumerKind + "." + SchemeGroupVersion.String() + ConsumerGroupVersionKind = SchemeGroupVersion.WithKind(ConsumerKind) +) + +func init() { + SchemeBuilder.Register(&Consumer{}, &ConsumerList{}) +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/consumer_types.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/consumer_types.go new file mode 100644 index 00000000..54404b28 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/consumer_types.go @@ -0,0 +1,255 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package consumer + +// +kubebuilder:object:generate=true +// ConsumerConfig will determine the properties for a JetStream consumer. +// For more information see https://docs.nats.io/jetstream/concepts/consumers +type ConsumerConfig struct { + // Description is a human readable description of the consumer. + // This can be particularly useful for ephemeral consumers to indicate their purpose since the durable name cannot be provided. + // +kubebuilder:validation:Optional + Description string `json:"description,omitempty"` + + // DeliverPolicy defines the point in the stream to receive messages from, either All, Last, New, ByStartSequence, ByStartTime, or LastPerSubject. + // Fore more information see https://docs.nats.io/jetstream/concepts/consumers#deliverpolicy + // +kubebuilder:validation:Enum=All;Last;New;ByStartSequence;ByStartTime;LastPerSubject + // +kubebuilder:default=All + // +kubebuilder:validation:Required + DeliverPolicy string `json:"deliverPolicy"` + + // OptStartSeq is an optional start sequence number and is used with the DeliverByStartSequence deliver policy. + // +kubebuilder:validation:Optional + OptStartSeq uint64 `json:"optStartSeq,omitempty"` + + // OptStartTime is an optional start time and is used with the DeliverByStartTime deliver policy. + // The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + // +kubebuilder:validation:Pattern="^((?:(\\d{4}-\\d{2}-\\d{2})T(\\d{2}:\\d{2}:\\d{2}(?:\\.\\d+)?))(Z|[\\+-]\\d{2}:\\d{2})?)$" + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type=string + OptStartTime string `json:"optStartTime,omitempty"` + + // AckPolicy describes the requirement of client acknowledgements, either Explicit, None, or All. + // For more information see https://docs.nats.io/nats-concepts/jetstream/consumers#ackpolicy + // +kubebuilder:validation:Enum=Explicit;None;All + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:default=Explicit + AckPolicy string `json:"ackPolicy"` + + // AckWait is the duration that the server will wait for an ack for any individual message once it has been delivered to a consumer. + // If an ack is not received in time, the message will be redelivered. + // Format is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + // +kubebuilder:validation:Pattern="([0-9]+h)?([0-9]+m)?([0-9]+s)?" + // +kubebuilder:validation:Required + // +kubebuilder:validation:Type=string + // +kubebuilder:default="30s" + AckWait string `json:"ackWait"` + + // MaxDeliver is the maximum number of times a specific message delivery will be attempted. + // Applies to any message that is re-sent due to ack policy (i.e. due to a negative ack, or no ack sent by the client). + // +kubebuilder:validation:Optional + // +kubebuilder:default=-1 + MaxDeliver int `json:"maxDeliver,omitempty"` + + // Backoff is a list of time durations that represent the time to delay based on delivery count. + // Format of the durations is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s where multiple durations are separated by commas. + // Example: `1s,2s,3s,4s,5s`. + // +kubebuilder:validation:Pattern="^(([0-9]+h)?([0-9]+m)?([0-9]+s)?)(?:,\\s*(([0-9]+h)?([0-9]+m)?([0-9]+s)?))*$" + // +kubebuilder:validation:Optional + BackOff string `json:"backoff,omitempty"` + + // FilterSubject defines an overlapping subject with the subjects bound to the stream which will filter the set of messages received by the consumer. + FilterSubject string `json:"filterSubject,omitempty"` + + // ReplayPolicy is used to define the mode of message replay. + // If the policy is Instant, the messages will be pushed to the client as fast as possible while adhering to the Ack Policy, Max Ack Pending and the client's ability to consume those messages. + // If the policy is Original, the messages in the stream will be pushed to the client at the same rate that they were originally received, simulating the original timing of messages. + // +kubebuilder:validation:Enum=Instant;Original + // +kubebuilder:validation:Required + // +kubebuilder:default=Instant + ReplayPolicy string `json:"replayPolicy"` + + // SampleFrequency sets the percentage of acknowledgements that should be sampled for observability. + // +kubebuilder:validation:Pattern="^([1-9][0-9]?|100)%?$" + // +kubebuilder:validation:Optional + SampleFrequency string `json:"sampleFreq,omitempty"` + + // MaxAckPending sets the number of outstanding acks that are allowed before message delivery is halted. + // +kubebuilder:validation:Optional + // +kubebuilder:default=1000 + MaxAckPending int `json:"maxAckPending,omitempty"` + + // InactiveThreshold defines the duration that instructs the server to cleanup consumers that are inactive for that long. + // Format is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + // +kubebuilder:validation:Pattern="([0-9]+h)?([0-9]+m)?([0-9]+s)?" + // +kubebuilder:validation:Optional + InactiveThreshold string `json:"inactiveThreshold,omitempty"` + + // Replicas sets the number of replicas for the consumer's state. + // By default, when the value is set to zero, consumers inherit the number of replicas from the stream. + // +kubebuilder:validation:Required + // +kubebuilder:default=0 + Replicas int `json:"numReplicas"` + + // MemoryStorage if set, forces the consumer state to be kept in memory rather than inherit the storage type of the stream (file in this case). + // +kubebuilder:validation:Optional + MemoryStorage bool `json:"memStorage,omitempty"` + + // PullConsumer defines the pull-based consumer configuration. + // +kubebuilder:validation:Optional + PullConsumer *PullConsumerSpec `json:"pull,omitempty"` + + // PushConsumer defines the push-based consumer configuration. + // +kubebuilder:validation:Optional + PushConsumer *PushConsumerSpec `json:"push,omitempty"` +} + +// PushConsumerSpec defines the pull-based consumer configuration. +// For more information, see https://docs.nats.io/nats-concepts/jetstream/consumers#push-specific +type PushConsumerSpec struct { + // RateLimit is used to throttle the delivery of messages to the consumer, in bits per second. + // +kubebuilder:validation:Optional + RateLimit uint64 `json:"rateLimitBps,omitempty"` + + // HeadersOnly delivers, if set, only the headers of messages in the stream and not the bodies. + // Additionally adds Nats-Msg-Size header to indicate the size of the removed payload. + // +kubebuilder:validation:Optional + HeadersOnly bool `json:"headersOnly,omitempty"` + + // DeliverSubject defines the subject to deliver messages to. + // Note, setting this field implicitly decides whether the consumer is push or pull-based. + // With a deliver subject, the server will push messages to client subscribed to this subject. + // This is a push consumer specific setting. + // +kubebuilder:validation:Required + DeliverSubject string `json:"deliverSubject,omitempty"` + + // DeliverGroup defines the queue group name which, if specified, is then used to distribute the messages between the subscribers to the consumer. + // This is analogous to a queue group in core NATS. See https://docs.nats.io/nats-concepts/core-nats/queue for more information on queue groups. + // This is a push consumer specific setting. + // +kubebuilder:validation:Optional + DeliverGroup string `json:"deliverGroup,omitempty"` + + // FlowControl enables per-subscription flow control using a sliding-window protocol. + // This protocol relies on the server and client exchanging messages to regulate when and how many messages are pushed to the client. + // This one-to-one flow control mechanism works in tandem with the one-to-many flow control imposed by MaxAckPending across all subscriptions bound to a consumer. + // This is a push consumer specific setting. + // +kubebuilder:validation:Optional + FlowControl bool `json:"flowControl,omitempty"` + + // IdleHeartbeat defines, if set, that the server will regularly send a status message to the client (i.e. when the period has elapsed) while there are no new messages to send. + // This lets the client know that the JetStream service is still up and running, even when there is no activity on the stream. + // The message status header will have a code of 100. Unlike FlowControl, it will have no reply to address. + // It may have a description such "Idle Heartbeat". + // Note that this heartbeat mechanism is all handled transparently by supported clients and does not need to be handled by the application. + // Format is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + // This is a push consumer specific setting. + // +kubebuilder:validation:Pattern="([0-9]+h)?([0-9]+m)?([0-9]+s)?" + // +kubebuilder:validation:Optional + IdleHeartbeat string `json:"idleHeartbeat,omitempty"` +} + +// PullConsumerSpec defines the pull-based consumer configuration. +// For more information, see https://docs.nats.io/nats-concepts/jetstream/consumers#pull-specific +type PullConsumerSpec struct { + // MaxWaiting defines the maximum number of waiting pull requests. + // This is a pull consumer specific setting. + // +kubebuilder:validation:Optional + // +kubebuilder:default=512 + MaxWaiting *int `json:"maxWaiting,omitempty"` + + // MaxRequestExpires defines the maximum duration a single pull request will wait for messages to be available to pull. + // This is a pull consumer specific setting. + // +kubebuilder:validation:Pattern="([0-9]+h)?([0-9]+m)?([0-9]+s)?" + // +kubebuilder:validation:Optional + MaxRequestExpires string `json:"maxExpires,omitempty"` + + // MaxRequestBatch defines th maximum batch size a single pull request can make. + // When set with MaxRequestMaxBytes, the batch size will be constrained by whichever limit is hit first. + // This is a pull consumer specific setting. + // +kubebuilder:validation:Optional + MaxRequestBatch int `json:"maxBatch,omitempty"` + + // MaxRequestMaxBytes defines the maximum total bytes that can be requested in a given batch. + // When set with MaxRequestBatch, the batch size will be constrained by whichever limit is hit first. + // This is a pull consumer specific setting. + // +kubebuilder:validation:Optional + MaxRequestMaxBytes int `json:"maxBytes,omitempty"` +} + +// ConsumerInfo is the info from a JetStream consumer. +type ConsumerObservationState struct { + // Domain is the domain of the consumer. + Domain string `json:"domain"` + // Stream is the stream name. + Stream string `json:"streamName"` + // Name is the consumer name. + Name string `json:"name"` + // Durable is the durable name. + Durable string `json:"durableName"` + // Created is the time the consumer was created. + // needs to be converted to time.Time + Created string `json:"created"` + // Delivered is the consumer sequence and last activity. + Delivered SequenceInfo `json:"delivered"` + // AckFloor TBD + AckFloor SequenceInfo `json:"ackFloor"` + // NumAckPending is the number of messages pending acknowledgement. + NumAckPending int `json:"numAckPending"` + // NumRedelivered is the number of redelivered messages. + NumRedelivered int `json:"numRedelivered"` + // NumWaiting is the number of messages waiting to be delivered. + NumWaiting int `json:"numWaiting"` + // NumPending is the number of messages pending. + NumPending uint64 `json:"numPending"` + // Cluster is the cluster information. + Cluster *ClusterInfo `json:"cluster,omitempty"` + // PushBound is whether the consumer is push bound. + PushBound string `json:"pushBound,omitempty"` +} + +// SequenceInfo has both the consumer and the stream sequence and last activity. +type SequenceInfo struct { + // Consumer is the consumer name + Consumer uint64 `json:"consumerSeq"` + // Stream is the name of the stream + Stream uint64 `json:"streamSeq"` + // Last is the last time the consumer was active + // needs to be converted to time.Time + Last string `json:"lastActive,omitempty"` +} + +// StreamObservationClusterInfo shows information about the underlying set of servers +// that make up the stream or consumer. +type ClusterInfo struct { + // Name is the name of the cluster. + Name string `json:"name,omitempty"` + // Leader is the leader of the cluster. + Leader string `json:"leader,omitempty"` + // Replicas are the replicas of the cluster. + Replicas []*PeerInfo `json:"replicas,omitempty"` +} + +// PeerInfo shows information about all the peers in the cluster that +// are supporting the stream or consumer. +type PeerInfo struct { + Name string `json:"name"` + Current bool `json:"current"` + Offline bool `json:"offline,omitempty"` + Active string `json:"active"` + Lag uint64 `json:"lag,omitempty"` +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/convert.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/convert.go new file mode 100644 index 00000000..932a44a6 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/convert.go @@ -0,0 +1,191 @@ +package consumer + +import ( + "strings" + "time" + + "github.com/nats-io/nats.go" + + "github.com/edgefarm/provider-nats/internal/convert" + + errors "github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/errors" +) + +func convertDeliverPolicy(delivery string) nats.DeliverPolicy { + switch delivery { + case "All": + return nats.DeliverAllPolicy + case "Last": + return nats.DeliverLastPolicy + case "New": + return nats.DeliverNewPolicy + case "ByStartSequence": + return nats.DeliverByStartSequencePolicy + case "ByStartTime": + return nats.DeliverByStartTimePolicy + case "LastPerSubject": + return nats.DeliverLastPerSubjectPolicy + default: + return nats.DeliverAllPolicy + } +} + +func convertAckPolicy(policy string) nats.AckPolicy { + switch policy { + case "None": + return nats.AckNonePolicy + case "All": + return nats.AckAllPolicy + case "Explicit": + return nats.AckExplicitPolicy + default: + return nats.AckExplicitPolicy + } +} + +func convertReplayPolicy(replay string) nats.ReplayPolicy { + switch replay { + case "Instant": + return nats.ReplayInstantPolicy + case "Original": + return nats.ReplayOriginalPolicy + default: + return nats.ReplayInstantPolicy + } +} + +func convertBase(name string, config *ConsumerConfig) *nats.ConsumerConfig { + return &nats.ConsumerConfig{ + Durable: name, + Name: name, + Description: config.Description, + DeliverPolicy: convertDeliverPolicy(config.DeliverPolicy), + OptStartSeq: config.OptStartSeq, + AckPolicy: convertAckPolicy(config.AckPolicy), + MaxDeliver: config.MaxDeliver, + FilterSubject: config.FilterSubject, + ReplayPolicy: convertReplayPolicy(config.ReplayPolicy), + SampleFrequency: config.SampleFrequency, + MaxAckPending: config.MaxAckPending, + Replicas: config.Replicas, + MemoryStorage: config.MemoryStorage, + } +} + +func convertTimes(in *ConsumerConfig, out *nats.ConsumerConfig) error { + if in.OptStartTime != "" { + optStartTime, err := convert.RFC3339ToTime(in.OptStartTime) + if err != nil { + return err + } + out.OptStartTime = optStartTime + } + + return nil +} + +func convertPushConsumer(in *ConsumerConfig, out *nats.ConsumerConfig) error { + if in.PushConsumer != nil { + out.DeliverSubject = in.PushConsumer.DeliverSubject + out.DeliverGroup = in.PushConsumer.DeliverGroup + out.FlowControl = in.PushConsumer.FlowControl + if in.PushConsumer.IdleHeartbeat != "" { + dur, err := time.ParseDuration(in.PushConsumer.IdleHeartbeat) + if err != nil { + return err + } + out.Heartbeat = dur + } + out.RateLimit = in.PushConsumer.RateLimit + out.HeadersOnly = in.PushConsumer.HeadersOnly + } + return nil +} + +func convertPullConsumer(in *ConsumerConfig, out *nats.ConsumerConfig) error { + if in.PullConsumer != nil { + out.MaxRequestBatch = in.PullConsumer.MaxRequestBatch + if in.PullConsumer.MaxRequestExpires != "" { + dur, err := time.ParseDuration(in.PullConsumer.MaxRequestExpires) + if err != nil { + return err + } + out.MaxRequestExpires = dur + } + out.MaxRequestMaxBytes = in.PullConsumer.MaxRequestMaxBytes + if in.PullConsumer.MaxWaiting != nil { + out.MaxWaiting = *in.PullConsumer.MaxWaiting + } else { + out.MaxWaiting = DefaultMaxWait + } + return nil + } + + // Ensure that even if no PullConsumer is defined, we set a default value + out.MaxWaiting = DefaultMaxWait + return nil +} + +func convertDurations(in *ConsumerConfig, out *nats.ConsumerConfig) error { + if in.AckWait != "" { + dur, err := time.ParseDuration(in.AckWait) + if err != nil { + return err + } + out.AckWait = dur + } + + if in.InactiveThreshold != "" { + dur, err := time.ParseDuration(in.InactiveThreshold) + if err != nil { + return err + } + out.InactiveThreshold = dur + } + + if in.BackOff != "" { + var backOff []time.Duration + split := strings.Split(in.BackOff, ",") + for _, b := range split { + + dur, err := time.ParseDuration(b) + if err != nil { + return err + } + backOff = append(backOff, dur) + } + out.BackOff = backOff + } + + return nil +} + +func ConfigV1Alpha1ToNats(name string, config *ConsumerConfig) (*nats.ConsumerConfig, error) { + natsConfig := convertBase(name, config) + err := convertDurations(config, natsConfig) + if err != nil { + return &nats.ConsumerConfig{}, err + } + + err = convertTimes(config, natsConfig) + if err != nil { + return &nats.ConsumerConfig{}, err + } + + if config.PushConsumer != nil && config.PullConsumer != nil { + return nil, errors.PushAndPullConsumerError + } + + err = convertPushConsumer(config, natsConfig) + if err != nil { + return &nats.ConsumerConfig{}, err + } + + if config.PushConsumer == nil { + err = convertPullConsumer(config, natsConfig) + if err != nil { + return &nats.ConsumerConfig{}, err + } + } + return natsConfig, nil +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/default.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/default.go new file mode 100644 index 00000000..8bb2fca7 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/default.go @@ -0,0 +1,36 @@ +package consumer + +const ( + // DefaultMaxWait is the default number of waiting pull requests. + DefaultMaxWait = 512 +) + +type ConsumerType int + +const ( + ConsumerTypePull ConsumerType = iota + ConsumerTypePush + ConsumerTypeNone +) + +func (c *ConsumerConfig) SetDefaults(t ConsumerType) { + c.DeliverPolicy = "All" + c.AckPolicy = "Explicit" + c.AckWait = "30s" + c.Replicas = 0 + c.ReplayPolicy = "Instant" + c.FilterSubject = "" + c.MaxDeliver = -1 + c.MaxAckPending = 1000 + c.BackOff = "" + + if t == ConsumerTypePush { + c.PushConsumer.HeadersOnly = false + } else if t == ConsumerTypePull { + c.PullConsumer.MaxWaiting = func() *int { i := DefaultMaxWait; return &i }() + c.PullConsumer.MaxRequestExpires = "" + c.PullConsumer.MaxRequestBatch = 0 + c.PullConsumer.MaxRequestMaxBytes = 0 + } + +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/doc.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/doc.go new file mode 100644 index 00000000..e574bc79 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/doc.go @@ -0,0 +1,2 @@ +// +k8s:deepcopy-gen=package +package consumer diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/errors/errors.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/errors/errors.go new file mode 100644 index 00000000..3b1103c1 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/errors/errors.go @@ -0,0 +1,19 @@ +package errors + +var ( + // ErrJetStreamNotEnabled is an error returned when both push and pull consumer are defined in the same consumer configuration. + PushAndPullConsumerError ConsumerError = &consumerError{message: "invalid configuration: cannot define both 'push' and 'pull' consumer"} +) + +// ConsumerError is an error result that happens when using consumer. +type ConsumerError interface { + error +} + +type consumerError struct { + message string +} + +func (err *consumerError) Error() string { + return err.message +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/zz_generated.deepcopy.go new file mode 100644 index 00000000..b58f7b6f --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/zz_generated.deepcopy.go @@ -0,0 +1,162 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package consumer + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterInfo) DeepCopyInto(out *ClusterInfo) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = make([]*PeerInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PeerInfo) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterInfo. +func (in *ClusterInfo) DeepCopy() *ClusterInfo { + if in == nil { + return nil + } + out := new(ClusterInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerConfig) DeepCopyInto(out *ConsumerConfig) { + *out = *in + if in.PullConsumer != nil { + in, out := &in.PullConsumer, &out.PullConsumer + *out = new(PullConsumerSpec) + (*in).DeepCopyInto(*out) + } + if in.PushConsumer != nil { + in, out := &in.PushConsumer, &out.PushConsumer + *out = new(PushConsumerSpec) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerConfig. +func (in *ConsumerConfig) DeepCopy() *ConsumerConfig { + if in == nil { + return nil + } + out := new(ConsumerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerObservationState) DeepCopyInto(out *ConsumerObservationState) { + *out = *in + out.Delivered = in.Delivered + out.AckFloor = in.AckFloor + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(ClusterInfo) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerObservationState. +func (in *ConsumerObservationState) DeepCopy() *ConsumerObservationState { + if in == nil { + return nil + } + out := new(ConsumerObservationState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerInfo) DeepCopyInto(out *PeerInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerInfo. +func (in *PeerInfo) DeepCopy() *PeerInfo { + if in == nil { + return nil + } + out := new(PeerInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PullConsumerSpec) DeepCopyInto(out *PullConsumerSpec) { + *out = *in + if in.MaxWaiting != nil { + in, out := &in.MaxWaiting, &out.MaxWaiting + *out = new(int) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PullConsumerSpec. +func (in *PullConsumerSpec) DeepCopy() *PullConsumerSpec { + if in == nil { + return nil + } + out := new(PullConsumerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PushConsumerSpec) DeepCopyInto(out *PushConsumerSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PushConsumerSpec. +func (in *PushConsumerSpec) DeepCopy() *PushConsumerSpec { + if in == nil { + return nil + } + out := new(PushConsumerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SequenceInfo) DeepCopyInto(out *SequenceInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceInfo. +func (in *SequenceInfo) DeepCopy() *SequenceInfo { + if in == nil { + return nil + } + out := new(SequenceInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/doc.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/doc.go new file mode 100644 index 00000000..9c30a179 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/groupversion_info.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..0d93c8d8 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/groupversion_info.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains the v1alpha1 group Consumer resources of the NATS provider. +// +kubebuilder:object:generate=true +// +groupName=nats.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "nats.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..377e546a --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,151 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Consumer) DeepCopyInto(out *Consumer) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Consumer. +func (in *Consumer) DeepCopy() *Consumer { + if in == nil { + return nil + } + out := new(Consumer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Consumer) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerList) DeepCopyInto(out *ConsumerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Consumer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerList. +func (in *ConsumerList) DeepCopy() *ConsumerList { + if in == nil { + return nil + } + out := new(ConsumerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConsumerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerObservation) DeepCopyInto(out *ConsumerObservation) { + *out = *in + in.State.DeepCopyInto(&out.State) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerObservation. +func (in *ConsumerObservation) DeepCopy() *ConsumerObservation { + if in == nil { + return nil + } + out := new(ConsumerObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerParameters) DeepCopyInto(out *ConsumerParameters) { + *out = *in + in.Config.DeepCopyInto(&out.Config) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerParameters. +func (in *ConsumerParameters) DeepCopy() *ConsumerParameters { + if in == nil { + return nil + } + out := new(ConsumerParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerSpec) DeepCopyInto(out *ConsumerSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerSpec. +func (in *ConsumerSpec) DeepCopy() *ConsumerSpec { + if in == nil { + return nil + } + out := new(ConsumerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConsumerStatus) DeepCopyInto(out *ConsumerStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConsumerStatus. +func (in *ConsumerStatus) DeepCopy() *ConsumerStatus { + if in == nil { + return nil + } + out := new(ConsumerStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managed.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managed.go new file mode 100644 index 00000000..75a83919 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managed.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Consumer. +func (mg *Consumer) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Consumer. +func (mg *Consumer) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Consumer. +func (mg *Consumer) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Consumer. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Consumer) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Consumer. +func (mg *Consumer) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Consumer. +func (mg *Consumer) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Consumer. +func (mg *Consumer) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Consumer. +func (mg *Consumer) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Consumer. +func (mg *Consumer) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Consumer. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Consumer) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Consumer. +func (mg *Consumer) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Consumer. +func (mg *Consumer) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managedlist.go b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 00000000..3bfc0945 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ConsumerList. +func (l *ConsumerList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/config_types.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/config_types.go new file mode 100644 index 00000000..5353ccd7 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/config_types.go @@ -0,0 +1,112 @@ +package v1alpha1 + +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + + "github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream" +) + +// StreamParameters are the configurable fields of a Stream. +type StreamParameters struct { + // Domain is the Jetstream domain in which the stream is created. + // +kubebuilder:validation:Optional + Domain string `json:"domain,omitempty"` + + // Config is the stream configuration. + Config stream.StreamConfig `json:"config"` +} + +// StreamObservation are the observable fields of a Stream. +type StreamObservation struct { + // Domain is the Jetstream domain in which the stream is created. + Domain string `json:"domain,omitempty"` + + // State is the current state of the stream + State stream.StreamObservationState `json:"state,omitempty"` + + // ClusterInfo shows information about the underlying set of servers that make up the stream. + ClusterInfo stream.StreamObservationClusterInfo `json:"clusterInfo,omitempty"` + + // Connection shows information about the connection to the stream. + Connection stream.StreamObservationConnection `json:"connection,omitempty"` +} + +// A StreamSpec defines the desired state of a Stream. +type StreamSpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider StreamParameters `json:"forProvider"` +} + +// A StreamStatus represents the observed state of a Stream. +type StreamStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider StreamObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:object:generate=true +// +genclient +// +genclient:nonNamespaced + +// A Stream is an example API type. +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="DOMAIN",type="string",JSONPath=".spec.forProvider.domain" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="ADDRESS",type="string",priority=1,JSONPath=".status.atProvider.connection.address" +// +kubebuilder:printcolumn:name="ACCOUNT PUB KEY",type="string",priority=1,JSONPath=".status.atProvider.connection.accountPublicKey" +// +kubebuilder:printcolumn:name="MESSAGES",type="string",priority=1,JSONPath=".status.atProvider.state.messages" +// +kubebuilder:printcolumn:name="BYTES",type="string",priority=1,JSONPath=".status.atProvider.state.bytes" +// +kubebuilder:printcolumn:name="CONSUMERS",type="string",priority=1,JSONPath=".status.atProvider.state.consumerCount" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,nats} +type Stream struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StreamSpec `json:"spec"` + Status StreamStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StreamList contains a list of Stream +type StreamList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Stream `json:"items"` +} + +// Stream type metadata. +var ( + StreamKind = reflect.TypeOf(Stream{}).Name() + StreamGroupKind = schema.GroupKind{Group: Group, Kind: StreamKind}.String() + StreamKindAPIVersion = StreamKind + "." + SchemeGroupVersion.String() + StreamGroupVersionKind = SchemeGroupVersion.WithKind(StreamKind) +) + +func init() { + SchemeBuilder.Register(&Stream{}, &StreamList{}) +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/doc.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/doc.go new file mode 100644 index 00000000..9c30a179 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/groupversion_info.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..c9397f40 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/groupversion_info.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains the v1alpha1 group Stream resources of the NATS provider. +// +kubebuilder:object:generate=true +// +groupName=nats.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "nats.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/convert.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/convert.go new file mode 100644 index 00000000..5c1ef802 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/convert.go @@ -0,0 +1,196 @@ +package stream + +import ( + "time" + + "github.com/nats-io/nats.go" + + convert "github.com/edgefarm/provider-nats/internal/convert" +) + +func convertRetentionPolicy(retention string) nats.RetentionPolicy { + switch retention { + case "Limits": + return nats.LimitsPolicy + case "Interest": + return nats.InterestPolicy + case "WorkQueue": + return nats.WorkQueuePolicy + default: + return nats.LimitsPolicy + } +} + +func convertDiscardPolicy(discard string) nats.DiscardPolicy { + switch discard { + case "Old": + return nats.DiscardOld + case "New": + return nats.DiscardNew + default: + return nats.DiscardOld + } +} + +func convertStorage(storage string) nats.StorageType { + switch storage { + case "File": + return nats.FileStorage + case "Memory": + return nats.MemoryStorage + default: + return nats.FileStorage + } +} + +func convertBase(name string, config *StreamConfig) *nats.StreamConfig { + return &nats.StreamConfig{ + Name: name, + Description: config.Description, + Subjects: config.Subjects, + Retention: convertRetentionPolicy(config.Retention), + MaxConsumers: config.MaxConsumers, + MaxMsgs: config.MaxMsgs, + MaxBytes: config.MaxBytes, + Discard: convertDiscardPolicy(config.Discard), + DiscardNewPerSubject: config.DiscardNewPerSubject, + MaxMsgsPerSubject: config.MaxMsgsPerSubject, + MaxMsgSize: config.MaxMsgSize, + Storage: convertStorage(config.Storage), + Replicas: config.Replicas, + NoAck: config.NoAck, + Template: config.TemplateOwner, + Sealed: config.Sealed, + DenyDelete: config.DenyDelete, + DenyPurge: config.DenyPurge, + AllowRollup: config.AllowRollup, + AllowDirect: config.AllowDirect, + MirrorDirect: config.MirrorDirect, + } +} + +func convertDurations(in *StreamConfig, out *nats.StreamConfig) error { + if in.MaxAge != "" { + maxAge, err := time.ParseDuration(in.MaxAge) + if err != nil { + return err + } + out.MaxAge = maxAge + } + + if in.Duplicates != "" { + duplicates, err := time.ParseDuration(in.Duplicates) + if err != nil { + return err + } + out.Duplicates = duplicates + } + return nil +} + +func convertMirror(in *StreamConfig, out *nats.StreamConfig) error { + if in.Mirror != nil { + var optStartTime *time.Time + if in.Mirror.StartTime != "" { + var err error + optStartTime, err = convert.RFC3339ToTime(in.Mirror.StartTime) + if err != nil { + return err + } + } + mirrorConfig := &nats.StreamSource{ + Name: in.Mirror.Name, + OptStartSeq: in.Mirror.StartSeq, + OptStartTime: optStartTime, + FilterSubject: in.Mirror.FilterSubject, + Domain: in.Mirror.Domain, + } + if in.Mirror.External != nil { + mirrorConfig.External = &nats.ExternalStream{ + APIPrefix: in.Mirror.External.APIPrefix, + DeliverPrefix: in.Mirror.External.DeliverPrefix, + } + } + out.Mirror = mirrorConfig + } + return nil +} + +func convertSources(in *StreamConfig, out *nats.StreamConfig) error { + if in.Sources != nil { + sources := []*nats.StreamSource{} + for _, source := range in.Sources { + var optStartTime *time.Time + if source.StartTime != "" { + var err error + optStartTime, err = convert.RFC3339ToTime(source.StartTime) + if err != nil { + return err + } + } + streamSource := &nats.StreamSource{ + Name: source.Name, + OptStartSeq: source.StartSeq, + OptStartTime: optStartTime, + FilterSubject: source.FilterSubject, + Domain: source.Domain, + } + if source.External != nil { + streamSource.External = &nats.ExternalStream{ + APIPrefix: source.External.APIPrefix, + DeliverPrefix: source.External.DeliverPrefix, + } + } + sources = append(sources, streamSource) + } + out.Sources = sources + } + return nil +} + +func ConfigV1Alpha1ToNats(name string, config *StreamConfig) (*nats.StreamConfig, error) { + natsConfig := convertBase(name, config) + err := convertDurations(config, natsConfig) + if err != nil { + return &nats.StreamConfig{}, err + } + + if config.Placement != nil { + placement := &nats.Placement{ + Cluster: config.Placement.Cluster, + Tags: config.Placement.Tags, + } + natsConfig.Placement = placement + } + + if config.RePublish != nil { + republish := &nats.RePublish{ + Source: config.RePublish.Source, + Destination: config.RePublish.Destination, + HeadersOnly: config.RePublish.HeadersOnly, + } + natsConfig.RePublish = republish + } + + err = convertMirror(config, natsConfig) + if err != nil { + return &nats.StreamConfig{}, err + } + + err = convertSources(config, natsConfig) + if err != nil { + return &nats.StreamConfig{}, err + } + + return natsConfig, nil +} + +func ConvertPeerInfo(peer *nats.PeerInfo) *PeerInfo { + return &PeerInfo{ + Name: peer.Name, + Current: peer.Current, + Offline: peer.Offline, + Active: peer.Active.String(), + Lag: peer.Lag, + } +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/default.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/default.go new file mode 100644 index 00000000..e97aadf8 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/default.go @@ -0,0 +1,15 @@ +package stream + +func (s *StreamConfig) SetDefaults() { + s.Retention = "Limits" + s.Discard = "DiscardOld" + s.Storage = "File" + s.Replicas = 1 + s.MaxConsumers = -1 + s.MaxMsgs = -1 + s.MaxBytes = -1 + s.MaxMsgSize = -1 + s.MaxAge = "0s" + s.MaxMsgsPerSubject = -1 + s.Duplicates = "2m0s" +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/doc.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/doc.go new file mode 100644 index 00000000..89eba6da --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/doc.go @@ -0,0 +1,2 @@ +// +k8s:deepcopy-gen=package +package stream diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/stream_types.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/stream_types.go new file mode 100644 index 00000000..2e317d41 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/stream_types.go @@ -0,0 +1,273 @@ +/* +Copyright 2023 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package stream + +// +kubebuilder:object:generate=true +// StreamConfig will determine the properties for a stream. +// There are sensible defaults for most. +type StreamConfig struct { + // Description is a human readable description of the stream. + // +kubebuilder:validation:Optional + Description string `json:"description,omitempty"` + + // Subjects is a list of subjects to consume, supports wildcards. + // +kubebuilder:validation:Optional + Subjects []string `json:"subjects,omitempty"` + + // Retention defines the retention policy for the stream. + // +kubebuilder:validation:Enum=Limits;Interest;WorkQueue + // +kubebuilder:validation:Required + // +kubebuilder:default=Limits + Retention string `json:"retention"` + + // MaxConsumers defines how many Consumers can be defined for a given Stream. + // Define -1 for unlimited. + // +kubebuilder:default=-1 + // +kubebuilder:validation:Required + MaxConsumers int `json:"maxConsumers"` + + // MaxMsgs defines how many messages may be in a Stream. + // Adheres to Discard Policy, removing oldest or refusing new messages if the Stream exceeds this number of messages. + // +kubebuilder:default=-1 + // +kubebuilder:validation:Required + MaxMsgs int64 `json:"maxMsgs"` + + // MaxBytes defines how many bytes the Stream may contain. + // Adheres to Discard Policy, removing oldest or refusing new messages if the Stream exceeds this size. + // +kubebuilder:default=-1 + // +kubebuilder:validation:Required + MaxBytes int64 `json:"maxBytes"` + + // Discard defines the behavior of discarding messages when any streams' limits have been reached. + // Old (default): This policy will delete the oldest messages in order to maintain the limit. For example, if MaxAge is set to one minute, the server will automatically delete messages older than one minute with this policy. + // New: This policy will reject new messages from being appended to the stream if it would exceed one of the limits. An extension to this policy is DiscardNewPerSubject which will apply this policy on a per-subject basis within the stream. + // +kubebuilder:validation:Enum=Old;New + // +kubebuilder:default=Old + // +kubebuilder:validation:Required + Discard string `json:"discard"` + + // DiscardOldPerSubject will discard old messages per subject. + // +kubebuilder:default=false + // +kubebuilder:validation:Optional + DiscardNewPerSubject bool `json:"discardNewPerSubject"` + + // MaxAge is the maximum age of a message in the stream. + // Format is a string duration, e.g. 1h, 1m, 1s, 1h30m or 2h3m4s. + // +kubebuilder:validation:Pattern="([0-9]+h)?([0-9]+m)?([0-9]+s)?" + // +kubebuilder:default="0s" + // +kubebuilder:validation:Optional + MaxAge string `json:"maxAge"` + + // MaxMsgsPerSubject defines the limits how many messages in the stream to retain per subject. + // +kubebuilder:default=-1 + // +kubebuilder:validation:Minimum=-1 + // +kubebuilder:validation:Optional + MaxMsgsPerSubject int64 `json:"maxMsgsPerSubject"` + + // MaxBytesPerSubject defines the largest message that will be accepted by the Stream. + // +kubebuilder:default=-1 + // +kubebuilder:validation:Minimum=-1 + // +kubebuilder:validation:Optional + MaxMsgSize int32 `json:"maxMsgSize"` + + // Storage defines the storage type for stream data.. + // +kubebuilder:validation:Enum=File;Memory + // +kubebuilder:default=File + Storage string `json:"storage"` + + // Replicas defines how many replicas to keep for each message in a clustered JetStream. + // +kubebuilder:default=1 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:validation:Maximum=5 + // +kubebuilder:validation:Optional + Replicas int `json:"replicas"` + + // NoAck is a flag to disable acknowledging messages that are received by the Stream. + // +kubebuilder:default=false + // +kubebuilder:validation:Optional + NoAck bool `json:"noAck"` + + // Template is the owner of the template associated with this stream. + // +kubebuilder:validation:Optional + TemplateOwner string `json:"template,omitempty"` + + // Duplicates defines the time window within which to track duplicate messages. + // +kubebuilder:default="2m0s" + // +kubebuilder:validation:Pattern="^(([0-9]+[smh]){1,3})$" + // +kubebuilder:validation:Optional + Duplicates string `json:"duplicates,omitempty"` + + // Placement is the placement policy for the stream. + // +kubebuilder:validation:Optional + Placement *Placement `json:"placement,omitempty"` + + // Mirror is the mirror configuration for the stream. + // +kubebuilder:validation:Optional + Mirror *StreamSource `json:"mirror,omitempty"` + + // Sources is the list of one or more sources configurations for the stream. + // +kubebuilder:validation:Optional + Sources []*StreamSource `json:"sources,omitempty"` + + // Sealed is a flag to prevent message deletion from the stream via limits or API. + // +kubebuilder:validation:Optional + Sealed bool `json:"sealed,omitempty"` + + // DenyDelete is a flag to restrict the ability to delete messages from a stream via the API. + // +kubebuilder:validation:Optional + DenyDelete bool `json:"denyDelete,omitempty"` + + // DenyPurge is a flag to restrict the ability to purge messages from a stream via the API. + // +kubebuilder:validation:Optional + DenyPurge bool `json:"denyPurge,omitempty"` + + // AllowRollup is a flag to allow the use of the Nats-Rollup header to replace all contents of a stream, or subject in a stream, with a single new message. + // +kubebuilder:validation:Optional + AllowRollup bool `json:"allowRollup,omitempty"` + + // Allow republish of the message after being sequenced and stored. + // +kubebuilder:validation:Optional + RePublish *RePublish `json:"rePublish,omitempty"` + + // AllowDirect is a flag that if true and the stream has more than one replica, each replica will respond to direct get requests for individual messages, not only the leader. + // +kubebuilder:validation:Optional + AllowDirect bool `json:"allowDirect,omitempty"` + + // MirrorDirect is a flag that if true, and the stream is a mirror, the mirror will participate in a serving direct get requests for individual messages from origin stream. + // +kubebuilder:validation:Optional + MirrorDirect bool `json:"mirrorDirect,omitempty"` +} + +// RePublish is for republishing messages once committed to a stream. The original subject cis remapped from the subject pattern to the destination pattern. +// For information on RePublish see https://docs.nats.io/nats-concepts/jetstream/streams#republish +type RePublish struct { + // Source is an optional subject pattern which is a subset of the subjects bound to the stream. It defaults to all messages in the stream, e.g. >. + // +kubebuilder:default=">" + Source string `json:"source"` + + // Destination is the destination subject messages will be re-published to. The source and destination must be a valid subject mapping. + // For information on subject mapping see https://docs.nats.io/jetstream/concepts/subjects#subject-mapping + Destination string `json:"destination"` + + // HeadersOnly defines if true, that the message data will not be included in the re-published message, only an additional header Nats-Msg-Size indicating the size of the message in bytes. + // +kubebuilder:validation:Optional + HeadersOnly bool `json:"headersOnly,omitempty"` +} + +// Placement is used to guide placement of streams in clustered JetStream. +// For information on Placement see https://docs.nats.io/nats-concepts/jetstream/streams#placement +type Placement struct { + // Cluster is the name of the Jetstream cluster. + Cluster string `json:"cluster"` + + // Tags defines a list of server tags. + // +kubebuilder:validation:Optional + Tags []string `json:"tags,omitempty"` +} + +// StreamSource dictates how streams can source from other streams. +type StreamSource struct { + // Name of the origin stream to source messages from. + Name string `json:"name"` + + // StartSeq is an optional start sequence the of the origin stream to start mirroring from. + // +kubebuilder:validation:Optional + StartSeq uint64 `json:"startSeq,omitempty"` + + // StartTime is an optional message start time to start mirroring from. Any messages that are equal to or greater than the start time will be included. + // The time format is RFC 3339, e.g. 2023-01-09T14:48:32Z + // +kubebuilder:validation:Pattern="^((?:(\\d{4}-\\d{2}-\\d{2})T(\\d{2}:\\d{2}:\\d{2}(?:\\.\\d+)?))(Z|[\\+-]\\d{2}:\\d{2})?)$" + // +kubebuilder:validation:Optional + StartTime string `json:"startTime,omitempty"` + + // FilterSubject is an optional filter subject which will include only messages that match the subject, typically including a wildcard. + // +kubebuilder:validation:Optional + FilterSubject string `json:"filterSubject,omitempty"` + + // Domain is the JetStream domain of where the origin stream exists. This is commonly used between a cluster/supercluster and a leaf node/cluster. + Domain string `json:"domain,omitempty"` + + // External is the external stream configuration. + // +kubebuilder:validation:Optional + External *ExternalStream `json:"external,omitempty"` +} + +// ExternalStream allows you to qualify access to a stream source in another +// account. +type ExternalStream struct { + // APIPrefix is the prefix for the API of the external stream. + APIPrefix string `json:"apiPrefix"` + + // DeliverPrefix is the prefix for the deliver subject of the external stream. + // +kubebuilder:validation:Optional + DeliverPrefix string `json:"deliverPrefix,omitempty"` +} + +type StreamObservationState struct { + // Mesasges is the number of messages in the stream. + Messages uint64 `json:"messages"` + // Bytes is the number of bytes in the stream. + Bytes string `json:"bytes"` + // FirstSequence is the first sequence number in the stream. + FirstSequence uint64 `json:"firstSequence"` + // FirstTimestamp is the first timestamp in the stream. + FirstTimestamp string `json:"firstTimestamp,omitempty"` + // LastSequence is the last sequence number in the stream. + LastSequence uint64 `json:"lastSequence"` + // LastTimestamp is the last timestamp in the stream. + LastTimestamp string `json:"lastTimestamp,omitempty"` + // ConsumerCount is the number of consumers in the stream. + ConsumerCount int `json:"consumerCount"` + // Deleted TBD + Deleted []uint64 `json:"deleted,omitempty"` + // NumDeleted TBD + NumDeleted int `json:"numDeleted,omitempty"` + // NumSubjects is the number of subjects in the stream. + NumSubjects uint64 `json:"numSubjects,omitempty"` + // Subjects is a map of subjects to their number of messages. + Subjects map[string]uint64 `json:"subjects,omitempty"` +} + +// StreamObservationClusterInfo shows information about the underlying set of servers +// that make up the stream or consumer. +type StreamObservationClusterInfo struct { + // Name is the name of the cluster. + Name string `json:"name,omitempty"` + // Leader is the leader of the cluster. + Leader string `json:"leader,omitempty"` + // Replicas are the replicas of the cluster. + Replicas []*PeerInfo `json:"replicas,omitempty"` +} + +type StreamObservationConnection struct { + // Address is the address of the connection. + Address string `json:"address"` + // AccountPublicKey is the public key of the used account. + AccountPublicKey string `json:"accountPublicKey"` + // UserPublicKey is the public key of the used user. + UserPublicKey string `json:"userPublicKey"` +} + +// PeerInfo shows information about all the peers in the cluster that +// are supporting the stream or consumer. +type PeerInfo struct { + Name string `json:"name"` + Current bool `json:"current"` + Offline bool `json:"offline,omitempty"` + Active string `json:"active"` + Lag uint64 `json:"lag,omitempty"` +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/zz_generated.deepcopy.go new file mode 100644 index 00000000..40eb6537 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream/zz_generated.deepcopy.go @@ -0,0 +1,223 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package stream + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalStream) DeepCopyInto(out *ExternalStream) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalStream. +func (in *ExternalStream) DeepCopy() *ExternalStream { + if in == nil { + return nil + } + out := new(ExternalStream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PeerInfo) DeepCopyInto(out *PeerInfo) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PeerInfo. +func (in *PeerInfo) DeepCopy() *PeerInfo { + if in == nil { + return nil + } + out := new(PeerInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Placement) DeepCopyInto(out *Placement) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Placement. +func (in *Placement) DeepCopy() *Placement { + if in == nil { + return nil + } + out := new(Placement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RePublish) DeepCopyInto(out *RePublish) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RePublish. +func (in *RePublish) DeepCopy() *RePublish { + if in == nil { + return nil + } + out := new(RePublish) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamConfig) DeepCopyInto(out *StreamConfig) { + *out = *in + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = new(Placement) + (*in).DeepCopyInto(*out) + } + if in.Mirror != nil { + in, out := &in.Mirror, &out.Mirror + *out = new(StreamSource) + (*in).DeepCopyInto(*out) + } + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]*StreamSource, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(StreamSource) + (*in).DeepCopyInto(*out) + } + } + } + if in.RePublish != nil { + in, out := &in.RePublish, &out.RePublish + *out = new(RePublish) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamConfig. +func (in *StreamConfig) DeepCopy() *StreamConfig { + if in == nil { + return nil + } + out := new(StreamConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservationClusterInfo) DeepCopyInto(out *StreamObservationClusterInfo) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = make([]*PeerInfo, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PeerInfo) + **out = **in + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservationClusterInfo. +func (in *StreamObservationClusterInfo) DeepCopy() *StreamObservationClusterInfo { + if in == nil { + return nil + } + out := new(StreamObservationClusterInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservationConnection) DeepCopyInto(out *StreamObservationConnection) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservationConnection. +func (in *StreamObservationConnection) DeepCopy() *StreamObservationConnection { + if in == nil { + return nil + } + out := new(StreamObservationConnection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservationState) DeepCopyInto(out *StreamObservationState) { + *out = *in + if in.Deleted != nil { + in, out := &in.Deleted, &out.Deleted + *out = make([]uint64, len(*in)) + copy(*out, *in) + } + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make(map[string]uint64, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservationState. +func (in *StreamObservationState) DeepCopy() *StreamObservationState { + if in == nil { + return nil + } + out := new(StreamObservationState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamSource) DeepCopyInto(out *StreamSource) { + *out = *in + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalStream) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSource. +func (in *StreamSource) DeepCopy() *StreamSource { + if in == nil { + return nil + } + out := new(StreamSource) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..55d5e2a6 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,153 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Stream) DeepCopyInto(out *Stream) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Stream. +func (in *Stream) DeepCopy() *Stream { + if in == nil { + return nil + } + out := new(Stream) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Stream) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamList) DeepCopyInto(out *StreamList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Stream, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamList. +func (in *StreamList) DeepCopy() *StreamList { + if in == nil { + return nil + } + out := new(StreamList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StreamList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamObservation) DeepCopyInto(out *StreamObservation) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.ClusterInfo.DeepCopyInto(&out.ClusterInfo) + out.Connection = in.Connection +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamObservation. +func (in *StreamObservation) DeepCopy() *StreamObservation { + if in == nil { + return nil + } + out := new(StreamObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamParameters) DeepCopyInto(out *StreamParameters) { + *out = *in + in.Config.DeepCopyInto(&out.Config) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamParameters. +func (in *StreamParameters) DeepCopy() *StreamParameters { + if in == nil { + return nil + } + out := new(StreamParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamSpec) DeepCopyInto(out *StreamSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamSpec. +func (in *StreamSpec) DeepCopy() *StreamSpec { + if in == nil { + return nil + } + out := new(StreamSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StreamStatus) DeepCopyInto(out *StreamStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + in.AtProvider.DeepCopyInto(&out.AtProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StreamStatus. +func (in *StreamStatus) DeepCopy() *StreamStatus { + if in == nil { + return nil + } + out := new(StreamStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managed.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managed.go new file mode 100644 index 00000000..09d9c9fd --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managed.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Stream. +func (mg *Stream) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Stream. +func (mg *Stream) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Stream. +func (mg *Stream) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Stream. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Stream) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Stream. +func (mg *Stream) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Stream. +func (mg *Stream) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Stream. +func (mg *Stream) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Stream. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Stream) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Stream. +func (mg *Stream) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Stream. +func (mg *Stream) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managedlist.go b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 00000000..9abb7cc1 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/stream/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this StreamList. +func (l *StreamList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/doc.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/doc.go new file mode 100644 index 00000000..9c30a179 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/groupversion_info.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..5dfdc867 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/groupversion_info.go @@ -0,0 +1,42 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains the core resources of the NATS provider. +// +kubebuilder:object:generate=true +// +groupName=nats.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "nats.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} + + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfig_types.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfig_types.go new file mode 100644 index 00000000..114fa85c --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfig_types.go @@ -0,0 +1,82 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// A ProviderConfigSpec defines the desired state of a ProviderConfig. +type ProviderConfigSpec struct { + // Credentials required to authenticate to this provider. + Credentials ProviderCredentials `json:"credentials"` +} + +// ProviderCredentials required to authenticate. +type ProviderCredentials struct { + // Source of the provider credentials. + // +kubebuilder:validation:Enum=None;Secret;InjectedIdentity;Environment;Filesystem + Source xpv1.CredentialsSource `json:"source"` + + xpv1.CommonCredentialSelectors `json:",inline"` +} + +// A ProviderConfigStatus reflects the observed state of a ProviderConfig. +type ProviderConfigStatus struct { + xpv1.ProviderConfigStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// A ProviderConfig configures a NATS provider. +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="SECRET-NAME",type="string",JSONPath=".spec.credentials.secretRef.name",priority=1 +// +kubebuilder:resource:scope=Cluster +type ProviderConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ProviderConfigSpec `json:"spec"` + Status ProviderConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ProviderConfigList contains a list of ProviderConfig. +type ProviderConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProviderConfig `json:"items"` +} + +// ProviderConfig type metadata. +var ( + ProviderConfigKind = reflect.TypeOf(ProviderConfig{}).Name() + ProviderConfigGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigKind}.String() + ProviderConfigKindAPIVersion = ProviderConfigKind + "." + SchemeGroupVersion.String() + ProviderConfigGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigKind) +) + +func init() { + SchemeBuilder.Register(&ProviderConfig{}, &ProviderConfigList{}) +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfigusage_types.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfigusage_types.go new file mode 100644 index 00000000..9d30cff9 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/providerconfigusage_types.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// +kubebuilder:object:root=true + +// A ProviderConfigUsage indicates that a resource is using a ProviderConfig. +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="CONFIG-NAME",type="string",JSONPath=".providerConfigRef.name" +// +kubebuilder:printcolumn:name="RESOURCE-KIND",type="string",JSONPath=".resourceRef.kind" +// +kubebuilder:printcolumn:name="RESOURCE-NAME",type="string",JSONPath=".resourceRef.name" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,provider,nats} +type ProviderConfigUsage struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + xpv1.ProviderConfigUsage `json:",inline"` +} + +// +kubebuilder:object:root=true + +// ProviderConfigUsageList contains a list of ProviderConfigUsage +type ProviderConfigUsageList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ProviderConfigUsage `json:"items"` +} + +// ProviderConfigUsage type metadata. +var ( + ProviderConfigUsageKind = reflect.TypeOf(ProviderConfigUsage{}).Name() + ProviderConfigUsageGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigUsageKind}.String() + ProviderConfigUsageKindAPIVersion = ProviderConfigUsageKind + "." + SchemeGroupVersion.String() + ProviderConfigUsageGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigUsageKind) + + ProviderConfigUsageListKind = reflect.TypeOf(ProviderConfigUsageList{}).Name() + ProviderConfigUsageListGroupKind = schema.GroupKind{Group: Group, Kind: ProviderConfigUsageListKind}.String() + ProviderConfigUsageListKindAPIVersion = ProviderConfigUsageListKind + "." + SchemeGroupVersion.String() + ProviderConfigUsageListGroupVersionKind = SchemeGroupVersion.WithKind(ProviderConfigUsageListKind) +) + +func init() { + SchemeBuilder.Register(&ProviderConfigUsage{}, &ProviderConfigUsageList{}) +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/storeconfig_types.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/storeconfig_types.go new file mode 100644 index 00000000..6b88e32e --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/storeconfig_types.go @@ -0,0 +1,90 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" +) + +// A StoreConfigSpec defines the desired state of a ProviderConfig. +type StoreConfigSpec struct { + xpv1.SecretStoreConfig `json:",inline"` +} + +// A StoreConfigStatus represents the status of a StoreConfig. +type StoreConfigStatus struct { + xpv1.ConditionedStatus `json:",inline"` +} + +// +kubebuilder:object:root=true + +// A StoreConfig configures how GCP controller should store connection details. +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="TYPE",type="string",JSONPath=".spec.type" +// +kubebuilder:printcolumn:name="DEFAULT-SCOPE",type="string",JSONPath=".spec.defaultScope" +// +kubebuilder:resource:scope=Cluster,categories={crossplane,store,gcp} +// +kubebuilder:subresource:status +type StoreConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec StoreConfigSpec `json:"spec"` + Status StoreConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// StoreConfigList contains a list of StoreConfig +type StoreConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []StoreConfig `json:"items"` +} + +// Note(turkenh): To be generated with AngryJet + +// GetStoreConfig returns SecretStoreConfig +func (in *StoreConfig) GetStoreConfig() xpv1.SecretStoreConfig { + return in.Spec.SecretStoreConfig +} + +// GetCondition of this StoreConfig. +func (in *StoreConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return in.Status.GetCondition(ct) +} + +// SetConditions of this StoreConfig. +func (in *StoreConfig) SetConditions(c ...xpv1.Condition) { + in.Status.SetConditions(c...) +} + +// StoreConfig type metadata. +var ( + StoreConfigKind = reflect.TypeOf(StoreConfig{}).Name() + StoreConfigGroupKind = schema.GroupKind{Group: Group, Kind: StoreConfigKind}.String() + StoreConfigKindAPIVersion = StoreConfigKind + "." + SchemeGroupVersion.String() + StoreConfigGroupVersionKind = SchemeGroupVersion.WithKind(StoreConfigKind) +) + +func init() { + SchemeBuilder.Register(&StoreConfig{}, &StoreConfigList{}) +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..1b1d509e --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,282 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfig) DeepCopyInto(out *ProviderConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfig. +func (in *ProviderConfig) DeepCopy() *ProviderConfig { + if in == nil { + return nil + } + out := new(ProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigList) DeepCopyInto(out *ProviderConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigList. +func (in *ProviderConfigList) DeepCopy() *ProviderConfigList { + if in == nil { + return nil + } + out := new(ProviderConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigSpec) DeepCopyInto(out *ProviderConfigSpec) { + *out = *in + in.Credentials.DeepCopyInto(&out.Credentials) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigSpec. +func (in *ProviderConfigSpec) DeepCopy() *ProviderConfigSpec { + if in == nil { + return nil + } + out := new(ProviderConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigStatus) DeepCopyInto(out *ProviderConfigStatus) { + *out = *in + in.ProviderConfigStatus.DeepCopyInto(&out.ProviderConfigStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigStatus. +func (in *ProviderConfigStatus) DeepCopy() *ProviderConfigStatus { + if in == nil { + return nil + } + out := new(ProviderConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsage) DeepCopyInto(out *ProviderConfigUsage) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.ProviderConfigUsage.DeepCopyInto(&out.ProviderConfigUsage) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsage. +func (in *ProviderConfigUsage) DeepCopy() *ProviderConfigUsage { + if in == nil { + return nil + } + out := new(ProviderConfigUsage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsage) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderConfigUsageList) DeepCopyInto(out *ProviderConfigUsageList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ProviderConfigUsage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderConfigUsageList. +func (in *ProviderConfigUsageList) DeepCopy() *ProviderConfigUsageList { + if in == nil { + return nil + } + out := new(ProviderConfigUsageList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ProviderConfigUsageList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProviderCredentials) DeepCopyInto(out *ProviderCredentials) { + *out = *in + in.CommonCredentialSelectors.DeepCopyInto(&out.CommonCredentialSelectors) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProviderCredentials. +func (in *ProviderCredentials) DeepCopy() *ProviderCredentials { + if in == nil { + return nil + } + out := new(ProviderCredentials) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfig) DeepCopyInto(out *StoreConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfig. +func (in *StoreConfig) DeepCopy() *StoreConfig { + if in == nil { + return nil + } + out := new(StoreConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigList) DeepCopyInto(out *StoreConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StoreConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigList. +func (in *StoreConfigList) DeepCopy() *StoreConfigList { + if in == nil { + return nil + } + out := new(StoreConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StoreConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigSpec) DeepCopyInto(out *StoreConfigSpec) { + *out = *in + in.SecretStoreConfig.DeepCopyInto(&out.SecretStoreConfig) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigSpec. +func (in *StoreConfigSpec) DeepCopy() *StoreConfigSpec { + if in == nil { + return nil + } + out := new(StoreConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StoreConfigStatus) DeepCopyInto(out *StoreConfigStatus) { + *out = *in + in.ConditionedStatus.DeepCopyInto(&out.ConditionedStatus) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StoreConfigStatus. +func (in *StoreConfigStatus) DeepCopy() *StoreConfigStatus { + if in == nil { + return nil + } + out := new(StoreConfigStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pc.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pc.go new file mode 100644 index 00000000..28fad33d --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pc.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this ProviderConfig. +func (p *ProviderConfig) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return p.Status.GetCondition(ct) +} + +// GetUsers of this ProviderConfig. +func (p *ProviderConfig) GetUsers() int64 { + return p.Status.Users +} + +// SetConditions of this ProviderConfig. +func (p *ProviderConfig) SetConditions(c ...xpv1.Condition) { + p.Status.SetConditions(c...) +} + +// SetUsers of this ProviderConfig. +func (p *ProviderConfig) SetUsers(i int64) { + p.Status.Users = i +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pcu.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pcu.go new file mode 100644 index 00000000..710c6c6a --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pcu.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetProviderConfigReference() xpv1.Reference { + return p.ProviderConfigReference +} + +// GetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) GetResourceReference() xpv1.TypedReference { + return p.ResourceReference +} + +// SetProviderConfigReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetProviderConfigReference(r xpv1.Reference) { + p.ProviderConfigReference = r +} + +// SetResourceReference of this ProviderConfigUsage. +func (p *ProviderConfigUsage) SetResourceReference(r xpv1.TypedReference) { + p.ResourceReference = r +} diff --git a/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pculist.go b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pculist.go new file mode 100644 index 00000000..468bad08 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/apis/v1alpha1/zz_generated.pculist.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this ProviderConfigUsageList. +func (p *ProviderConfigUsageList) GetItems() []resource.ProviderConfigUsage { + items := make([]resource.ProviderConfigUsage, len(p.Items)) + for i := range p.Items { + items[i] = &p.Items[i] + } + return items +} diff --git a/vendor/github.com/edgefarm/provider-nats/internal/convert/time.go b/vendor/github.com/edgefarm/provider-nats/internal/convert/time.go new file mode 100644 index 00000000..dec43e41 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-nats/internal/convert/time.go @@ -0,0 +1,22 @@ +package convert + +import ( + "time" +) + +func TimeToRFC3339(t *time.Time) (string, error) { + str, err := t.MarshalText() + if err != nil { + return "", err + } + return string(str), nil +} + +func RFC3339ToTime(t string) (*time.Time, error) { + r := &time.Time{} + err := r.UnmarshalText([]byte(t)) + if err != nil { + return r, err + } + return r, nil +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/LICENSE b/vendor/github.com/edgefarm/provider-natssecrets/LICENSE new file mode 100644 index 00000000..ae6b59dc --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 The Crossplane Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/account_types.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/account_types.go new file mode 100644 index 00000000..0d0016e5 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/account_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1" +) + +// AccountParameters are the configurable fields of a Account. +type AccountParameters struct { + Operator string `json:"operator"` + // +kubebuilder:validation:Optional + UseSigningKey string `json:"useSigningKey,omitempty"` + // +kubebuilder:validation:Optional + Claims v1alpha1.AccountClaims `json:"claims,omitempty"` +} + +// AccountObservation are the observable fields of a Account. +type AccountObservation struct { + Operator string `json:"operator,omitempty"` + Account string `json:"account,omitempty"` + Issue string `json:"issue,omitempty"` + NKey string `json:"nkey,omitempty"` + JWT string `json:"jwt,omitempty"` + NKeyPath string `json:"nkeyPath,omitempty"` + JWTPath string `json:"jwtPath,omitempty"` + Pushed string `json:"pushed,omitempty"` + LastPushed string `json:"lastPushed,omitempty"` +} + +// A AccountSpec defines the desired state of a Account. +type AccountSpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider AccountParameters `json:"forProvider"` +} + +// A AccountStatus represents the observed state of a Account. +type AccountStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider AccountObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// A Account is an example API type. +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="OPERATOR",type="string",JSONPath=".status.atProvider.operator" +// +kubebuilder:printcolumn:name="NKEY",type="string",priority=1,JSONPath=".status.atProvider.nkey" +// +kubebuilder:printcolumn:name="JWT",type="string",priority=1,JSONPath=".status.atProvider.jwt" +// +kubebuilder:printcolumn:name="PUSHED",type="string",priority=1,JSONPath=".status.atProvider.pushed" +// +kubebuilder:printcolumn:name="LAST PUSHED",type="string",priority=1,JSONPath=".status.atProvider.lastPushed" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,natssecrets} +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AccountSpec `json:"spec"` + Status AccountStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// AccountList contains a list of Account +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +// Account type metadata. +var ( + AccountKind = reflect.TypeOf(Account{}).Name() + AccountGroupKind = schema.GroupKind{Group: Group, Kind: AccountKind}.String() + AccountKindAPIVersion = AccountKind + "." + SchemeGroupVersion.String() + AccountGroupVersionKind = SchemeGroupVersion.WithKind(AccountKind) +) + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/doc.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/doc.go new file mode 100644 index 00000000..dc5d3ae2 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/groupversion_info.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..e600778b --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/groupversion_info.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains the v1alpha1 group Sample resources of the NatsSecrets provider. +// +kubebuilder:object:generate=true +// +groupName=issue.natssecrets.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "issue.natssecrets.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..2b3569ba --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountObservation) DeepCopyInto(out *AccountObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountObservation. +func (in *AccountObservation) DeepCopy() *AccountObservation { + if in == nil { + return nil + } + out := new(AccountObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountParameters) DeepCopyInto(out *AccountParameters) { + *out = *in + in.Claims.DeepCopyInto(&out.Claims) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountParameters. +func (in *AccountParameters) DeepCopy() *AccountParameters { + if in == nil { + return nil + } + out := new(AccountParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + out.AtProvider = in.AtProvider +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managed.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managed.go new file mode 100644 index 00000000..2ab9745e --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managed.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this Account. +func (mg *Account) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this Account. +func (mg *Account) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this Account. +func (mg *Account) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this Account. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *Account) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this Account. +func (mg *Account) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this Account. +func (mg *Account) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this Account. +func (mg *Account) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this Account. +func (mg *Account) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this Account. +func (mg *Account) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this Account. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *Account) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this Account. +func (mg *Account) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this Account. +func (mg *Account) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managedlist.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 00000000..a6ccbde6 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this AccountList. +func (l *AccountList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/doc.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/doc.go new file mode 100644 index 00000000..dc5d3ae2 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/groupversion_info.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/groupversion_info.go new file mode 100644 index 00000000..e600778b --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/groupversion_info.go @@ -0,0 +1,40 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains the v1alpha1 group Sample resources of the NatsSecrets provider. +// +kubebuilder:object:generate=true +// +groupName=issue.natssecrets.crossplane.io +// +versionName=v1alpha1 +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +// Package type metadata. +const ( + Group = "issue.natssecrets.crossplane.io" + Version = "v1alpha1" +) + +var ( + // SchemeGroupVersion is group version used to register these objects + SchemeGroupVersion = schema.GroupVersion{Group: Group, Version: Version} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +) diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/normalize.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/normalize.go new file mode 100644 index 00000000..1449b53a --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/normalize.go @@ -0,0 +1,21 @@ +package v1alpha1 + +import "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1" + +func FixEmptySlices(params *v1alpha1.UserClaims) { + if params == nil { + return + } + if params.Permissions.Pub.Allow == nil { + params.Permissions.Pub.Allow = []string{} + } + if params.Permissions.Pub.Deny == nil { + params.Permissions.Pub.Deny = []string{} + } + if params.Permissions.Sub.Allow == nil { + params.Permissions.Sub.Allow = []string{} + } + if params.Permissions.Sub.Deny == nil { + params.Permissions.Sub.Deny = []string{} + } +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/user_types.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/user_types.go new file mode 100644 index 00000000..8e0c034a --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/user_types.go @@ -0,0 +1,104 @@ +/* +Copyright 2022 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + + xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1" +) + +// UserParameters are the configurable fields of a User. +type UserParameters struct { + Operator string `json:"operator"` + Account string `json:"account"` + // +kubebuilder:validation:Optional + UseSigningKey string `json:"useSigningKey,omitempty"` + // +kubebuilder:validation:Optional + Claims v1alpha1.UserClaims `json:"claims,omitempty"` +} + +// UserObservation are the observable fields of a User. +type UserObservation struct { + Operator string `json:"operator,omitempty"` + Account string `json:"account,omitempty"` + User string `json:"user,omitempty"` + Issue string `json:"issue,omitempty"` + NKey string `json:"nkey,omitempty"` + JWT string `json:"jwt,omitempty"` + Creds string `json:"creds,omitempty"` + NKeyPath string `json:"nkeyPath,omitempty"` + JWTPath string `json:"jwtPath,omitempty"` +} + +// A UserSpec defines the desired state of a User. +type UserSpec struct { + xpv1.ResourceSpec `json:",inline"` + ForProvider UserParameters `json:"forProvider"` +} + +// A UserStatus represents the observed state of a User. +type UserStatus struct { + xpv1.ResourceStatus `json:",inline"` + AtProvider UserObservation `json:"atProvider,omitempty"` +} + +// +kubebuilder:object:root=true + +// A User is an example API type. +// +kubebuilder:printcolumn:name="EXTERNAL-NAME",type="string",JSONPath=".metadata.annotations.crossplane\\.io/external-name" +// +kubebuilder:printcolumn:name="READY",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status" +// +kubebuilder:printcolumn:name="SYNCED",type="string",JSONPath=".status.conditions[?(@.type=='Synced')].status" +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:printcolumn:name="OPERATOR",type="string",JSONPath=".status.atProvider.operator" +// +kubebuilder:printcolumn:name="ACCOUNT",type="string",JSONPath=".status.atProvider.account" +// +kubebuilder:printcolumn:name="NKEY",type="string",priority=1,JSONPath=".status.atProvider.nkey" +// +kubebuilder:printcolumn:name="JWT",type="string",priority=1,JSONPath=".status.atProvider.jwt" +// +kubebuilder:subresource:status +// +kubebuilder:resource:scope=Cluster,categories={crossplane,managed,natssecrets} +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UserSpec `json:"spec"` + Status UserStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// UserList contains a list of User +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +// User type metadata. +var ( + UserKind = reflect.TypeOf(User{}).Name() + UserGroupKind = schema.GroupKind{Group: Group, Kind: UserKind}.String() + UserKindAPIVersion = UserKind + "." + SchemeGroupVersion.String() + UserGroupVersionKind = SchemeGroupVersion.WithKind(UserKind) +) + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..bbebc044 --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,150 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserObservation) DeepCopyInto(out *UserObservation) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserObservation. +func (in *UserObservation) DeepCopy() *UserObservation { + if in == nil { + return nil + } + out := new(UserObservation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserParameters) DeepCopyInto(out *UserParameters) { + *out = *in + in.Claims.DeepCopyInto(&out.Claims) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserParameters. +func (in *UserParameters) DeepCopy() *UserParameters { + if in == nil { + return nil + } + out := new(UserParameters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + in.ResourceSpec.DeepCopyInto(&out.ResourceSpec) + in.ForProvider.DeepCopyInto(&out.ForProvider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in + in.ResourceStatus.DeepCopyInto(&out.ResourceStatus) + out.AtProvider = in.AtProvider +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managed.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managed.go new file mode 100644 index 00000000..5cb407aa --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managed.go @@ -0,0 +1,86 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import xpv1 "github.com/crossplane/crossplane-runtime/apis/common/v1" + +// GetCondition of this User. +func (mg *User) GetCondition(ct xpv1.ConditionType) xpv1.Condition { + return mg.Status.GetCondition(ct) +} + +// GetDeletionPolicy of this User. +func (mg *User) GetDeletionPolicy() xpv1.DeletionPolicy { + return mg.Spec.DeletionPolicy +} + +// GetProviderConfigReference of this User. +func (mg *User) GetProviderConfigReference() *xpv1.Reference { + return mg.Spec.ProviderConfigReference +} + +/* +GetProviderReference of this User. +Deprecated: Use GetProviderConfigReference. +*/ +func (mg *User) GetProviderReference() *xpv1.Reference { + return mg.Spec.ProviderReference +} + +// GetPublishConnectionDetailsTo of this User. +func (mg *User) GetPublishConnectionDetailsTo() *xpv1.PublishConnectionDetailsTo { + return mg.Spec.PublishConnectionDetailsTo +} + +// GetWriteConnectionSecretToReference of this User. +func (mg *User) GetWriteConnectionSecretToReference() *xpv1.SecretReference { + return mg.Spec.WriteConnectionSecretToReference +} + +// SetConditions of this User. +func (mg *User) SetConditions(c ...xpv1.Condition) { + mg.Status.SetConditions(c...) +} + +// SetDeletionPolicy of this User. +func (mg *User) SetDeletionPolicy(r xpv1.DeletionPolicy) { + mg.Spec.DeletionPolicy = r +} + +// SetProviderConfigReference of this User. +func (mg *User) SetProviderConfigReference(r *xpv1.Reference) { + mg.Spec.ProviderConfigReference = r +} + +/* +SetProviderReference of this User. +Deprecated: Use SetProviderConfigReference. +*/ +func (mg *User) SetProviderReference(r *xpv1.Reference) { + mg.Spec.ProviderReference = r +} + +// SetPublishConnectionDetailsTo of this User. +func (mg *User) SetPublishConnectionDetailsTo(r *xpv1.PublishConnectionDetailsTo) { + mg.Spec.PublishConnectionDetailsTo = r +} + +// SetWriteConnectionSecretToReference of this User. +func (mg *User) SetWriteConnectionSecretToReference(r *xpv1.SecretReference) { + mg.Spec.WriteConnectionSecretToReference = r +} diff --git a/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managedlist.go b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managedlist.go new file mode 100644 index 00000000..21fa57ab --- /dev/null +++ b/vendor/github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1/zz_generated.managedlist.go @@ -0,0 +1,29 @@ +/* +Copyright 2020 The Crossplane Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by angryjet. DO NOT EDIT. + +package v1alpha1 + +import resource "github.com/crossplane/crossplane-runtime/pkg/resource" + +// GetItems of this UserList. +func (l *UserList) GetItems() []resource.Managed { + items := make([]resource.Managed, len(l.Items)) + for i := range l.Items { + items[i] = &l.Items[i] + } + return items +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/LICENSE b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/LICENSE new file mode 100644 index 00000000..fd82d37f --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/LICENSE @@ -0,0 +1,364 @@ +Copyright (c) 2017 HashiCorp, Inc. + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/api.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/api.go new file mode 100644 index 00000000..17374b3d --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/api.go @@ -0,0 +1,219 @@ +/* +Copyright 2023 The EdgeFarm Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common" + "github.com/nats-io/jwt/v2" +) + +// +kubebuilder:object:generate=true +// Specifies claims of the JWT +type AccountClaims struct { + // Common data for all JWTs + common.ClaimsData `json:",inline"` + // Account specific claims + // +kubebuilder:validation:Optional + Account `json:"account,omitempty"` +} + +// Specifies account specific claims data +type Account struct { + // A list of account/subject combinations that this account is allowed to import + // +kubebuilder:validation:Optional + Imports []Import `json:"imports,omitempty"` + // A list of account/subject combinations that this account is allowed to export + // +kubebuilder:validation:Optional + Exports []Export `json:"exports,omitempty"` + // A set of limits for this account + // +kubebuilder:validation:Optional + Limits OperatorLimits `json:"limits,omitempty"` + // A list of signing keys the account can use + // +kubebuilder:validation:Optional + SigningKeys []string `json:"signingKeys,omitempty"` + // Stores user JWTs that have been revoked and the time they were revoked + // +kubebuilder:validation:Optional + Revocations map[string]int64 `json:"revocations,omitempty"` + // Default pub/sub permissions for this account that users inherit + // +kubebuilder:validation:Optional + DefaultPermissions common.Permissions `json:"defaultPermissions,omitempty"` + // Stores subjects that get mapped to other subjects using a weighted mapping. + // For more information see https://docs.nats.io/nats-concepts/subject_mapping + // +kubebuilder:validation:Optional + Mappings map[string][]WeightedMapping `json:"mappings,omitempty"` + common.Info `json:",inline"` + common.GenericFields `json:",inline"` +} + +// WeightedMapping is a mapping from one subject to another with a weight and a destination cluster +type WeightedMapping struct { + // The subject to map to + Subject string `json:"subject"` + // The amount of 100% that this mapping should be used + // +kubebuilder:validation:Optional + Weight uint8 `json:"weight,omitempty"` + // The cluster to map to + // +kubebuilder:validation:Optional + Cluster string `json:"cluster,omitempty"` +} + +// OperatorLimits represents the limits for that are set on an account +type OperatorLimits struct { + common.NatsLimits `json:",inline"` + AccountLimits `json:",inline"` + JetStreamLimits `json:",inline"` + // JetStreamTieredLimits as far as i can tell it is only used by NATS internally. + // So not exposed to the user for now. + // JetStreamTieredLimits `json:"tieredLimits,omitempty"` +} + +// JetStreamTieredLimits as far as i can tell it is only used by NATS internally. +// So not exposed to the user for now. +// type JetStreamTieredLimits map[string]JetStreamLimits + +// JetStreamLimits represents the Jetstream limits for an account +type JetStreamLimits struct { + // Max number of bytes stored in memory across all streams. (0 means disabled) + // +kubebuilder:validation:Optional + MemoryStorage int64 `json:"memStorage,omitempty"` + // Max number of bytes stored on disk across all streams. (0 means disabled) + // +kubebuilder:validation:Optional + DiskStorage int64 `json:"diskStorage,omitempty"` + // Max number of streams + // +kubebuilder:validation:Optional + Streams int64 `json:"streams,omitempty"` + // Max number of consumers + // +kubebuilder:validation:Optional + Consumer int64 `json:"consumer,omitempty"` + // Max number of acks pending + // +kubebuilder:validation:Optional + MaxAckPending int64 `json:"maxAckPending,omitempty"` + // Max number of bytes a stream can have in memory. (0 means unlimited) + // +kubebuilder:validation:Optional + // +kubebuilder:default=0 + MemoryMaxStreamBytes int64 `json:"memMaxStreamBytes,omitempty"` + // Max number of bytes a stream can have on disk. (0 means unlimited) + // +kubebuilder:validation:Optional + // +kubebuilder:default=0 + DiskMaxStreamBytes int64 `json:"diskMaxStreamBytes,omitempty"` + // Max bytes required by all Streams + // +kubebuilder:validation:Optional + MaxBytesRequired bool `json:"maxBytesRequired,omitempty"` +} + +type AccountLimits struct { + // Max number of imports + // +kubebuilder:validation:Optional + Imports int64 `json:"imports,omitempty"` + // Max number of exports + // +kubebuilder:validation:Optional + Exports int64 `json:"exports,omitempty"` + // Specifies if wildcards are allowed in exports + // +kubebuilder:validation:Optional + WildcardExports bool `json:"wildcardExports,omitempty"` + // Specifies that user JWT can't be bearer token + // +kubebuilder:validation:Optional + DisallowBearer bool `json:"disallowBearer,omitempty"` + // Max number of connections + // +kubebuilder:validation:Optional + Conn int64 `json:"conn,omitempty"` + // Max number of leaf node connections + // +kubebuilder:validation:Optional + LeafNodeConn int64 `json:"leafNodeConn,omitempty"` +} + +func convertExportType(t string) (jwt.ExportType, error) { + switch t { + case "Stream": + return jwt.Stream, nil + case "Service": + return jwt.Service, nil + case "Unknown": + return jwt.Unknown, nil + default: + return -1, fmt.Errorf("invalid export type") + } +} + +// Import describes a mapping from another account into this one +type Import struct { + // The name of the import + // +kubebuilder:validation:Optional + Name string `json:"name,omitempty"` + // The subject to import + // +kubebuilder:validation:Optional + Subject string `json:"subject,omitempty"` + // The account to import from + // +kubebuilder:validation:Optional + Account string `json:"account,omitempty"` + // The token to use for the import + // +kubebuilder:validation:Optional + Token string `json:"token,omitempty"` + // The local subject to import to + // +kubebuilder:validation:Optional + LocalSubject string `json:"localSubject,omitempty"` + // The type of the import + // +kubebuilder:validation:Optional + Type string `json:"type,omitempty"` + // Specifies if the import is shared + // +kubebuilder:validation:Optional + Share bool `json:"share,omitempty"` +} + +// Export describes a mapping from this account to another one +type Export struct { + // The name of the export + // +kubebuilder:validation:Optional + Name string `json:"name,omitempty"` + // The subject to export + // +kubebuilder:validation:Optional + Subject string `json:"subject,omitempty"` + // The type of the export + // +kubebuilder:validation:Optional + Type string `json:"type,omitempty"` + // Specifies if a token is required for the export + // +kubebuilder:validation:Optional + TokenReq bool `json:"tokenReq,omitempty"` + // The revocations for the export + // +kubebuilder:validation:Optional + Revocations map[string]int64 `json:"revocations,omitempty"` + // The response type for the export + // +kubebuilder:validation:Optional + ResponseType string `json:"responseType,omitempty"` + // The response threshold for the export + // +kubebuilder:validation:Optional + ResponseThreshold string `json:"responseThreshold,omitempty"` + // The latency for the export. + // +kubebuilder:validation:Optional + Latency *ServiceLatency `json:"serviceLatency,omitempty"` + // The account token position for the export + // +kubebuilder:validation:Optional + AccountTokenPosition uint `json:"accountTokenPosition,omitempty"` + // Specifies if the export is advertised + // +kubebuilder:validation:Optional + Advertise bool `json:"advertise,omitempty"` + common.Info `json:",inline"` +} + +type ServiceLatency struct { + // Specifies the sampling for the latency + Sampling int `json:"sampling"` + // Specifies the results for the latency + Results string `json:"results"` +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/convert.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/convert.go new file mode 100644 index 00000000..dae23e89 --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/convert.go @@ -0,0 +1,196 @@ +package v1alpha1 + +import ( + "time" + + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common" + "github.com/nats-io/jwt/v2" +) + +func convertImports(in *Account, out *jwt.Account) error { + if in.Imports == nil { + return nil + } + for _, e := range in.Imports { + nats := &jwt.Import{ + Name: e.Name, + Subject: jwt.Subject(e.Subject), + Account: e.Account, + Token: e.Token, + LocalSubject: jwt.RenamingSubject(jwt.Subject(e.LocalSubject)), + Share: e.Share, + } + t, err := convertExportType(e.Type) + if err != nil { + return err + } + nats.Type = t + out.Imports = append(out.Imports, nats) + } + return nil +} + +func convertExports(in *Account, out *jwt.Account) error { + if in.Exports == nil { + return nil + } + for _, e := range in.Exports { + nats := &jwt.Export{ + Name: e.Name, + Subject: jwt.Subject(e.Subject), + TokenReq: e.TokenReq, + Revocations: e.Revocations, + ResponseType: jwt.ResponseType(e.ResponseType), + AccountTokenPosition: e.AccountTokenPosition, + Advertise: e.Advertise, + Info: jwt.Info{ + Description: e.Info.Description, + InfoURL: e.Info.InfoURL, + }, + } + t, err := convertExportType(e.Type) + if err != nil { + return err + } + nats.Type = t + if e.Latency != nil { + nats.Latency = &jwt.ServiceLatency{ + Sampling: jwt.SamplingRate(e.Latency.Sampling), + Results: jwt.Subject(e.Latency.Results), + } + } + if e.ResponseThreshold != "" { + dur, err := time.ParseDuration(e.ResponseThreshold) + if err != nil { + return err + } + nats.ResponseThreshold = dur + } + out.Exports = append(out.Exports, nats) + } + return nil +} + +func convertLimits(in *Account, out *jwt.Account) { + out.Limits = jwt.OperatorLimits{ + NatsLimits: jwt.NatsLimits{ + Subs: in.Limits.NatsLimits.Subs, + Data: in.Limits.NatsLimits.Data, + Payload: in.Limits.NatsLimits.Payload, + }, + AccountLimits: jwt.AccountLimits{ + Imports: in.Limits.AccountLimits.Imports, + Exports: in.Limits.AccountLimits.Exports, + WildcardExports: in.Limits.AccountLimits.WildcardExports, + DisallowBearer: in.Limits.AccountLimits.DisallowBearer, + Conn: in.Limits.AccountLimits.Conn, + LeafNodeConn: in.Limits.AccountLimits.LeafNodeConn, + }, + JetStreamLimits: jwt.JetStreamLimits{ + MemoryStorage: in.Limits.JetStreamLimits.MemoryStorage, + DiskStorage: in.Limits.JetStreamLimits.DiskStorage, + Streams: in.Limits.JetStreamLimits.Streams, + Consumer: in.Limits.JetStreamLimits.Consumer, + MaxAckPending: in.Limits.JetStreamLimits.MaxAckPending, + MemoryMaxStreamBytes: in.Limits.JetStreamLimits.MemoryMaxStreamBytes, + DiskMaxStreamBytes: in.Limits.JetStreamLimits.DiskMaxStreamBytes, + MaxBytesRequired: in.Limits.JetStreamLimits.MaxBytesRequired, + }, + } +} + +func convertSigningKeyKind(kind string) jwt.ScopeType { + switch kind { + case "user": + return jwt.UserScopeType + } + return jwt.UserScopeType +} + +func convertSigningKeys(in *Account, out *jwt.Account) { + if in.SigningKeys == nil { + return + } + out.SigningKeys = make(map[string]jwt.Scope, len(in.SigningKeys)) + out.SigningKeys.Add(in.SigningKeys...) +} + +func convertRevocations(in *Account, out *jwt.Account) { + if in.Revocations == nil { + return + } + out.Revocations = make(map[string]int64, len(in.Revocations)) + for k, v := range in.Revocations { + out.Revocations[k] = v + } +} + +func convertDefaultPermissions(in *Account, out *jwt.Account) { + out.DefaultPermissions = jwt.Permissions{ + Pub: jwt.Permission{ + Allow: jwt.StringList(in.DefaultPermissions.Pub.Allow), + Deny: jwt.StringList(in.DefaultPermissions.Pub.Deny), + }, + Sub: jwt.Permission{ + Allow: jwt.StringList(in.DefaultPermissions.Sub.Allow), + Deny: jwt.StringList(in.DefaultPermissions.Sub.Deny), + }, + } + if in.DefaultPermissions.Resp != nil { + out.DefaultPermissions.Resp = &jwt.ResponsePermission{ + MaxMsgs: in.DefaultPermissions.Resp.MaxMsgs, + } + if in.DefaultPermissions.Resp.Expires != "" { + dur, err := time.ParseDuration(in.DefaultPermissions.Resp.Expires) + if err != nil { + return + } + out.DefaultPermissions.Resp.Expires = dur + } + } +} + +func convertMappings(in *Account, out *jwt.Account) { + if in.Mappings == nil { + return + } + out.Mappings = make(map[jwt.Subject][]jwt.WeightedMapping, len(in.Mappings)) + for k, v := range in.Mappings { + mappings := []jwt.WeightedMapping{} + for _, m := range v { + mappings = append(mappings, jwt.WeightedMapping{ + Subject: jwt.Subject(m.Subject), + Weight: m.Weight, + Cluster: m.Cluster, + }) + } + out.Mappings[jwt.Subject(k)] = mappings + } +} + +func Convert(claims *AccountClaims) (*jwt.AccountClaims, error) { + nats := &jwt.AccountClaims{ + Account: jwt.Account{ + Info: jwt.Info{ + Description: claims.Description, + InfoURL: claims.InfoURL, + }, + }, + } + err := convertImports(&claims.Account, &nats.Account) + if err != nil { + return nil, err + } + err = convertExports(&claims.Account, &nats.Account) + if err != nil { + return nil, err + } + convertLimits(&claims.Account, &nats.Account) + convertSigningKeys(&claims.Account, &nats.Account) + convertRevocations(&claims.Account, &nats.Account) + convertDefaultPermissions(&claims.Account, &nats.Account) + convertMappings(&claims.Account, &nats.Account) + nats.ClaimsData = common.ConvertClaimsData(&claims.ClaimsData) + nats.GenericFields = common.ConvertGenericFields(&claims.GenericFields) + return nats, nil +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/doc.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/doc.go new file mode 100644 index 00000000..30f47fcd --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/doc.go @@ -0,0 +1,2 @@ +// +k8s:deepcopy-gen=package +package v1alpha1 diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..09890163 --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,220 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 The EdgeFarm Authors. + +Licensed under the Mozilla Public License, version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.mozilla.org/en-US/MPL/2.0/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + if in.Imports != nil { + in, out := &in.Imports, &out.Imports + *out = make([]Import, len(*in)) + copy(*out, *in) + } + if in.Exports != nil { + in, out := &in.Exports, &out.Exports + *out = make([]Export, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + out.Limits = in.Limits + if in.SigningKeys != nil { + in, out := &in.SigningKeys, &out.SigningKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Revocations != nil { + in, out := &in.Revocations, &out.Revocations + *out = make(map[string]int64, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.DefaultPermissions.DeepCopyInto(&out.DefaultPermissions) + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make(map[string][]WeightedMapping, len(*in)) + for key, val := range *in { + var outVal []WeightedMapping + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]WeightedMapping, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + out.Info = in.Info + in.GenericFields.DeepCopyInto(&out.GenericFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountClaims) DeepCopyInto(out *AccountClaims) { + *out = *in + out.ClaimsData = in.ClaimsData + in.Account.DeepCopyInto(&out.Account) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountClaims. +func (in *AccountClaims) DeepCopy() *AccountClaims { + if in == nil { + return nil + } + out := new(AccountClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountLimits) DeepCopyInto(out *AccountLimits) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountLimits. +func (in *AccountLimits) DeepCopy() *AccountLimits { + if in == nil { + return nil + } + out := new(AccountLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Export) DeepCopyInto(out *Export) { + *out = *in + if in.Revocations != nil { + in, out := &in.Revocations, &out.Revocations + *out = make(map[string]int64, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Latency != nil { + in, out := &in.Latency, &out.Latency + *out = new(ServiceLatency) + **out = **in + } + out.Info = in.Info +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Export. +func (in *Export) DeepCopy() *Export { + if in == nil { + return nil + } + out := new(Export) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Import) DeepCopyInto(out *Import) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Import. +func (in *Import) DeepCopy() *Import { + if in == nil { + return nil + } + out := new(Import) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JetStreamLimits) DeepCopyInto(out *JetStreamLimits) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JetStreamLimits. +func (in *JetStreamLimits) DeepCopy() *JetStreamLimits { + if in == nil { + return nil + } + out := new(JetStreamLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OperatorLimits) DeepCopyInto(out *OperatorLimits) { + *out = *in + out.NatsLimits = in.NatsLimits + out.AccountLimits = in.AccountLimits + out.JetStreamLimits = in.JetStreamLimits +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OperatorLimits. +func (in *OperatorLimits) DeepCopy() *OperatorLimits { + if in == nil { + return nil + } + out := new(OperatorLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceLatency) DeepCopyInto(out *ServiceLatency) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceLatency. +func (in *ServiceLatency) DeepCopy() *ServiceLatency { + if in == nil { + return nil + } + out := new(ServiceLatency) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedMapping) DeepCopyInto(out *WeightedMapping) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedMapping. +func (in *WeightedMapping) DeepCopy() *WeightedMapping { + if in == nil { + return nil + } + out := new(WeightedMapping) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/convert.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/convert.go new file mode 100644 index 00000000..27c1cb8e --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/convert.go @@ -0,0 +1,24 @@ +package common + +import "github.com/nats-io/jwt/v2" + +func ConvertGenericFields(in *GenericFields) jwt.GenericFields { + return jwt.GenericFields{ + Tags: jwt.TagList(in.Tags), + Type: jwt.ClaimType(in.Type), + Version: in.Version, + } +} + +func ConvertClaimsData(in *ClaimsData) jwt.ClaimsData { + return jwt.ClaimsData{ + Audience: in.Audience, + Expires: in.Expires, + ID: in.ID, + IssuedAt: in.IssuedAt, + Issuer: in.Issuer, + Name: in.Name, + NotBefore: in.NotBefore, + Subject: in.Subject, + } +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/types.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/types.go new file mode 100644 index 00000000..9d214991 --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/types.go @@ -0,0 +1,83 @@ +// +kubebuilder:object:generate=true +package common + +// +kubebuilder:object:generate=true +type GenericFields struct { + // Do not set manually + // +kubebuilder:validation:Optional + Tags []string `json:"tags,omitempty"` + // Do not set manually + // +kubebuilder:validation:Optional + Type string `json:"type,omitempty"` + // Do not set manually + // +kubebuilder:validation:Optional + Version int `json:"version,omitempty"` +} + +type Info struct { + // A human readable description + Description string `json:"description,omitempty"` + // This is a URL to more information + InfoURL string `json:"infoURL,omitempty"` +} + +type ClaimsData struct { + // Do not set manually + Audience string `json:"aud,omitempty"` + // Do not set manually + Expires int64 `json:"exp,omitempty"` + // Do not set manually + ID string `json:"jti,omitempty"` + // Do not set manually + IssuedAt int64 `json:"iat,omitempty"` + // Do not set manually + Issuer string `json:"iss,omitempty"` + // Do not set manually + Name string `json:"name,omitempty"` + // Do not set manually + NotBefore int64 `json:"nbf,omitempty"` + // Do not set manually + Subject string `json:"sub,omitempty"` +} + +type Permissions struct { + // Specifies the publish permissions + // +kubebuilder:validation:Optional + Pub Permission `json:"pub,omitempty"` + // Specifies the subscribe permissions + // +kubebuilder:validation:Optional + Sub Permission `json:"sub,omitempty"` + // Specifies the response permissions + // +kubebuilder:validation:Optional + Resp *ResponsePermission `json:"resp,omitempty"` +} + +// ResponsePermission Specifies the response permissions +type ResponsePermission struct { + // The maximum number of messages + MaxMsgs int `json:"max"` + // Specifies the time to live for the response + Expires string `json:"ttl"` +} + +// Permission Specifies allow/deny subjects +type Permission struct { + // Specifies allowed subjects + // +kubebuilder:validation:Optional + Allow []string `json:"allow,omitempty"` + // Specifies denied subjects + // +kubebuilder:validation:Optional + Deny []string `json:"deny,omitempty"` +} + +type NatsLimits struct { + // Specifies the maximum number of subscriptions + // +kubebuilder:validation:Optional + Subs int64 `json:"subs,omitempty"` + // Specifies the maximum number of bytes + // +kubebuilder:validation:Optional + Data int64 `json:"data,omitempty"` + // Specifies the maximum message payload + // +kubebuilder:validation:Optional + Payload int64 `json:"payload,omitempty"` +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/zz_generated.deepcopy.go new file mode 100644 index 00000000..731ffa16 --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common/zz_generated.deepcopy.go @@ -0,0 +1,151 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 The EdgeFarm Authors. + +Licensed under the Mozilla Public License, version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.mozilla.org/en-US/MPL/2.0/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package common + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClaimsData) DeepCopyInto(out *ClaimsData) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClaimsData. +func (in *ClaimsData) DeepCopy() *ClaimsData { + if in == nil { + return nil + } + out := new(ClaimsData) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GenericFields) DeepCopyInto(out *GenericFields) { + *out = *in + if in.Tags != nil { + in, out := &in.Tags, &out.Tags + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenericFields. +func (in *GenericFields) DeepCopy() *GenericFields { + if in == nil { + return nil + } + out := new(GenericFields) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Info) DeepCopyInto(out *Info) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Info. +func (in *Info) DeepCopy() *Info { + if in == nil { + return nil + } + out := new(Info) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NatsLimits) DeepCopyInto(out *NatsLimits) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NatsLimits. +func (in *NatsLimits) DeepCopy() *NatsLimits { + if in == nil { + return nil + } + out := new(NatsLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Permission) DeepCopyInto(out *Permission) { + *out = *in + if in.Allow != nil { + in, out := &in.Allow, &out.Allow + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Deny != nil { + in, out := &in.Deny, &out.Deny + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permission. +func (in *Permission) DeepCopy() *Permission { + if in == nil { + return nil + } + out := new(Permission) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Permissions) DeepCopyInto(out *Permissions) { + *out = *in + in.Pub.DeepCopyInto(&out.Pub) + in.Sub.DeepCopyInto(&out.Sub) + if in.Resp != nil { + in, out := &in.Resp, &out.Resp + *out = new(ResponsePermission) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Permissions. +func (in *Permissions) DeepCopy() *Permissions { + if in == nil { + return nil + } + out := new(Permissions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResponsePermission) DeepCopyInto(out *ResponsePermission) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResponsePermission. +func (in *ResponsePermission) DeepCopy() *ResponsePermission { + if in == nil { + return nil + } + out := new(ResponsePermission) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/api.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/api.go new file mode 100644 index 00000000..8dccd816 --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/api.go @@ -0,0 +1,68 @@ +package v1alpha1 + +import ( + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common" +) + +// Specifies claims of the JWT +// +kubebuilder:object:generate=true +type UserClaims struct { + // Common data for all JWTs + common.ClaimsData `json:",inline"` + // Specifies the user specific part of the JWT + // +kubebuilder:validation:Optional + User `json:"user,omitempty"` +} + +// User holds user specific claims data +type User struct { + // The account that issued this user JWT + // +kubebuilder:validation:Optional + IssuerAccount string `json:"issuerAccount,omitempty"` + UserPermissionLimits `json:",inline"` + common.GenericFields `json:",inline"` +} + +// UserPermissionLimits Specifies the permissions and limits for this user +type UserPermissionLimits struct { + common.Permissions `json:",inline"` + Limits `json:",inline"` + // Specifies if this user is allowed to use a bearer token to connect + // +kubebuilder:validation:Optional + BearerToken bool `json:"bearerToken,omitempty"` + // Specifies the allowed connection types for this user + // Allowed values are STANDARD, WEBSOCKET, LEAFNODE, LEAFNODE_WS, MQTT, MQTT_WS + // +kubebuilder:validation:Enum=STANDARD;WEBSOCKET;LEAFNODE;LEAFNODE_WS;MQTT;MQTT_WS + // +kubebuilder:validation:Optional + AllowedConnectionTypes []string `json:"allowedConnectionTypes,omitempty"` +} + +// Limits Specifies the limits for this user +type Limits struct { + UserLimits `json:",inline"` + common.NatsLimits `json:",inline"` +} + +// UserLimits Specifies the limits for this user +type UserLimits struct { + // A list of CIDR specifications the user is allowed to connect from + // Example: 192.168.1.0/24, 192.168.1.1/1 or 2001:db8:a0b:12f0::1/32 + // +kubebuilder:validation:Optional + Src []string `json:"src,omitempty"` + // Represents allowed time ranges the user is allowed to interact with the system + Times []TimeRange `json:"times,omitempty"` + // The locale for the times in the format "Europe/Berlin" + // +kubebuilder:validation:Optional + Locale string `json:"timesLocation,omitempty"` +} + +type TimeRange struct { + // The start time in the format HH:MM:SS + // +kubebuilder:validation:Pattern="^(((([0-1][0-9])|(2[0-3])):?[0-5][0-9]:?[0-5][0-9]+$))" + // +kubebuilder:validation:Optional + Start string `json:"start,omitempty"` + // The end time in the format HH:MM:SS + // +kubebuilder:validation:Pattern="^(((([0-1][0-9])|(2[0-3])):?[0-5][0-9]:?[0-5][0-9]+$))" + // +kubebuilder:validation:Optional + End string `json:"end,omitempty"` +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/convert.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/convert.go new file mode 100644 index 00000000..0ceb3b5f --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/convert.go @@ -0,0 +1,85 @@ +package v1alpha1 + +import ( + "fmt" + "time" + + "golang.org/x/exp/slices" + + "github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common" + "github.com/nats-io/jwt/v2" +) + +func convertNatsLimits(in *User, out *jwt.User) { + out.NatsLimits.Data = in.NatsLimits.Data + out.NatsLimits.Payload = in.NatsLimits.Payload + out.NatsLimits.Subs = in.NatsLimits.Subs +} + +func convertUserLimits(in *User, out *jwt.User) { + out.UserLimits.Locale = in.UserLimits.Locale + out.UserLimits.Src = jwt.CIDRList(jwt.TagList(in.UserLimits.Src)) + for _, e := range in.UserLimits.Times { + out.UserLimits.Times = append(out.UserLimits.Times, jwt.TimeRange{ + Start: e.Start, + End: e.End, + }) + } +} + +func convertUserPermissionLimits(in *User, out *jwt.User) error { + out.UserPermissionLimits.Pub.Allow = in.UserPermissionLimits.Pub.Allow + out.UserPermissionLimits.Pub.Deny = in.UserPermissionLimits.Pub.Deny + out.UserPermissionLimits.Sub.Allow = in.UserPermissionLimits.Sub.Allow + out.UserPermissionLimits.Sub.Deny = in.UserPermissionLimits.Sub.Deny + if in.UserPermissionLimits.Resp != nil { + out.UserPermissionLimits.Resp = &jwt.ResponsePermission{} + out.UserPermissionLimits.Resp.MaxMsgs = in.UserPermissionLimits.Resp.MaxMsgs + dur, err := time.ParseDuration(in.UserPermissionLimits.Resp.Expires) + if err != nil { + return err + } + out.UserPermissionLimits.Resp.Expires = dur + } + out.UserPermissionLimits.BearerToken = in.UserPermissionLimits.BearerToken + err := checkAllowedConnectionTypes(in.UserPermissionLimits.AllowedConnectionTypes) + if err != nil { + return err + } + out.UserPermissionLimits.AllowedConnectionTypes = jwt.StringList(in.UserPermissionLimits.AllowedConnectionTypes) + return nil +} + +func checkAllowedConnectionTypes(t []string) error { + allowed := []string{ + "STANDARD", + "WEBSOCKET", + "LEAFNODE", + "LEAFNODE_WS", + "MQTT", + "MQTT_WS", + } + for _, e := range t { + if !slices.Contains(allowed, e) { + return fmt.Errorf("invalid connection type: %s", e) + } + } + return nil +} + +func Convert(claims *UserClaims) (*jwt.UserClaims, error) { + nats := &jwt.UserClaims{ + User: jwt.User{ + IssuerAccount: claims.IssuerAccount, + }, + } + err := convertUserPermissionLimits(&claims.User, &nats.User) + if err != nil { + return nil, err + } + convertUserLimits(&claims.User, &nats.User) + convertNatsLimits(&claims.User, &nats.User) + nats.ClaimsData = common.ConvertClaimsData(&claims.ClaimsData) + nats.GenericFields = common.ConvertGenericFields(&claims.GenericFields) + return nats, nil +} diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/doc.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/doc.go new file mode 100644 index 00000000..30f47fcd --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/doc.go @@ -0,0 +1,2 @@ +// +k8s:deepcopy-gen=package +package v1alpha1 diff --git a/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..3c96be8a --- /dev/null +++ b/vendor/github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,137 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2023 The EdgeFarm Authors. + +Licensed under the Mozilla Public License, version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.mozilla.org/en-US/MPL/2.0/ + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import () + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Limits) DeepCopyInto(out *Limits) { + *out = *in + in.UserLimits.DeepCopyInto(&out.UserLimits) + out.NatsLimits = in.NatsLimits +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Limits. +func (in *Limits) DeepCopy() *Limits { + if in == nil { + return nil + } + out := new(Limits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TimeRange) DeepCopyInto(out *TimeRange) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TimeRange. +func (in *TimeRange) DeepCopy() *TimeRange { + if in == nil { + return nil + } + out := new(TimeRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + in.UserPermissionLimits.DeepCopyInto(&out.UserPermissionLimits) + in.GenericFields.DeepCopyInto(&out.GenericFields) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserClaims) DeepCopyInto(out *UserClaims) { + *out = *in + out.ClaimsData = in.ClaimsData + in.User.DeepCopyInto(&out.User) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserClaims. +func (in *UserClaims) DeepCopy() *UserClaims { + if in == nil { + return nil + } + out := new(UserClaims) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserLimits) DeepCopyInto(out *UserLimits) { + *out = *in + if in.Src != nil { + in, out := &in.Src, &out.Src + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Times != nil { + in, out := &in.Times, &out.Times + *out = make([]TimeRange, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserLimits. +func (in *UserLimits) DeepCopy() *UserLimits { + if in == nil { + return nil + } + out := new(UserLimits) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserPermissionLimits) DeepCopyInto(out *UserPermissionLimits) { + *out = *in + in.Permissions.DeepCopyInto(&out.Permissions) + in.Limits.DeepCopyInto(&out.Limits) + if in.AllowedConnectionTypes != nil { + in, out := &in.AllowedConnectionTypes, &out.AllowedConnectionTypes + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserPermissionLimits. +func (in *UserPermissionLimits) DeepCopy() *UserPermissionLimits { + if in == nil { + return nil + } + out := new(UserPermissionLimits) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md index 74a37815..02a73ccf 100644 --- a/vendor/github.com/emicklei/go-restful/v3/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/v3/CHANGES.md @@ -1,10 +1,21 @@ # Change history of go-restful -## [v3.9.0] - 20221-07-21 +## [v3.10.1] - 2022-11-19 + +- fix broken 3.10.0 by using path package for joining paths + +## [v3.10.0] - 2022-10-11 - BROKEN + +- changed tokenizer to match std route match behavior; do not trimright the path (#511) +- Add MIME_ZIP (#512) +- Add MIME_ZIP and HEADER_ContentDisposition (#513) +- Changed how to get query parameter issue #510 + +## [v3.9.0] - 2022-07-21 - add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci) -## [v3.8.0] - 20221-06-06 +## [v3.8.0] - 2022-06-06 - use exact matching of allowed domain entries, issue #489 (#493) - this changes fixes [security] Authorization Bypass Through User-Controlled Key diff --git a/vendor/github.com/emicklei/go-restful/v3/constants.go b/vendor/github.com/emicklei/go-restful/v3/constants.go index 203439c5..2328bde6 100644 --- a/vendor/github.com/emicklei/go-restful/v3/constants.go +++ b/vendor/github.com/emicklei/go-restful/v3/constants.go @@ -7,12 +7,14 @@ package restful const ( MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces() MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces() + MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces() MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default HEADER_Allow = "Allow" HEADER_Accept = "Accept" HEADER_Origin = "Origin" HEADER_ContentType = "Content-Type" + HEADER_ContentDisposition = "Content-Disposition" HEADER_LastModified = "Last-Modified" HEADER_AcceptEncoding = "Accept-Encoding" HEADER_ContentEncoding = "Content-Encoding" diff --git a/vendor/github.com/emicklei/go-restful/v3/request.go b/vendor/github.com/emicklei/go-restful/v3/request.go index 5725a075..0020095e 100644 --- a/vendor/github.com/emicklei/go-restful/v3/request.go +++ b/vendor/github.com/emicklei/go-restful/v3/request.go @@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request { // a "Unable to unmarshal content of type:" response is returned. // Valid values are restful.MIME_JSON and restful.MIME_XML // Example: -// restful.DefaultRequestContentType(restful.MIME_JSON) +// +// restful.DefaultRequestContentType(restful.MIME_JSON) func DefaultRequestContentType(mime string) { defaultRequestContentType = mime } @@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string { // QueryParameter returns the (first) Query parameter value by its name func (r *Request) QueryParameter(name string) string { - return r.Request.FormValue(name) + return r.Request.URL.Query().Get(name) } // QueryParameters returns the all the query parameters values by name diff --git a/vendor/github.com/emicklei/go-restful/v3/response.go b/vendor/github.com/emicklei/go-restful/v3/response.go index 8f0b56aa..a41a92cc 100644 --- a/vendor/github.com/emicklei/go-restful/v3/response.go +++ b/vendor/github.com/emicklei/go-restful/v3/response.go @@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) { if DefaultResponseMimeType == MIME_XML { return entityAccessRegistry.accessorAt(MIME_XML) } + if DefaultResponseMimeType == MIME_ZIP { + return entityAccessRegistry.accessorAt(MIME_ZIP) + } // Fallback to whatever the route says it can produce. // https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html for _, each := range r.routeProduces { diff --git a/vendor/github.com/emicklei/go-restful/v3/route.go b/vendor/github.com/emicklei/go-restful/v3/route.go index 193f4a6b..ea05b3da 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route.go +++ b/vendor/github.com/emicklei/go-restful/v3/route.go @@ -164,7 +164,7 @@ func tokenizePath(path string) []string { if "/" == path { return nil } - return strings.Split(strings.Trim(path, "/"), "/") + return strings.Split(strings.TrimLeft(path, "/"), "/") } // for debugging @@ -176,3 +176,5 @@ func (r *Route) String() string { func (r *Route) EnableContentEncoding(enabled bool) { r.contentEncodingEnabled = &enabled } + +var TrimRightSlashEnabled = false diff --git a/vendor/github.com/emicklei/go-restful/v3/route_builder.go b/vendor/github.com/emicklei/go-restful/v3/route_builder.go index 23641b6d..830ebf14 100644 --- a/vendor/github.com/emicklei/go-restful/v3/route_builder.go +++ b/vendor/github.com/emicklei/go-restful/v3/route_builder.go @@ -7,6 +7,7 @@ package restful import ( "fmt" "os" + "path" "reflect" "runtime" "strings" @@ -46,11 +47,12 @@ type RouteBuilder struct { // Do evaluates each argument with the RouteBuilder itself. // This allows you to follow DRY principles without breaking the fluent programming style. // Example: -// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) // -// func Returns500(b *RouteBuilder) { -// b.Returns(500, "Internal Server Error", restful.ServiceError{}) -// } +// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500)) +// +// func Returns500(b *RouteBuilder) { +// b.Returns(500, "Internal Server Error", restful.ServiceError{}) +// } func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder { for _, each := range oneArgBlocks { each(b) @@ -352,7 +354,7 @@ func (b *RouteBuilder) Build() Route { } func concatPath(path1, path2 string) string { - return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/") + return path.Join(path1, path2) } var anonymousFuncCount int32 diff --git a/vendor/github.com/evanphx/json-patch/v5/LICENSE b/vendor/github.com/evanphx/json-patch/v5/LICENSE new file mode 100644 index 00000000..df76d7d7 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/LICENSE @@ -0,0 +1,25 @@ +Copyright (c) 2014, Evan Phoenix +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +* Neither the name of the Evan Phoenix nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/evanphx/json-patch/v5/errors.go b/vendor/github.com/evanphx/json-patch/v5/errors.go new file mode 100644 index 00000000..75304b44 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/errors.go @@ -0,0 +1,38 @@ +package jsonpatch + +import "fmt" + +// AccumulatedCopySizeError is an error type returned when the accumulated size +// increase caused by copy operations in a patch operation has exceeded the +// limit. +type AccumulatedCopySizeError struct { + limit int64 + accumulated int64 +} + +// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError. +func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError { + return &AccumulatedCopySizeError{limit: l, accumulated: a} +} + +// Error implements the error interface. +func (a *AccumulatedCopySizeError) Error() string { + return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit) +} + +// ArraySizeError is an error type returned when the array size has exceeded +// the limit. +type ArraySizeError struct { + limit int + size int +} + +// NewArraySizeError returns an ArraySizeError. +func NewArraySizeError(l, s int) *ArraySizeError { + return &ArraySizeError{limit: l, size: s} +} + +// Error implements the error interface. +func (a *ArraySizeError) Error() string { + return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit) +} diff --git a/vendor/github.com/evanphx/json-patch/v5/merge.go b/vendor/github.com/evanphx/json-patch/v5/merge.go new file mode 100644 index 00000000..a7c45734 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/merge.go @@ -0,0 +1,408 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" +) + +func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode { + curDoc, err := cur.intoDoc() + + if err != nil { + pruneNulls(patch) + return patch + } + + patchDoc, err := patch.intoDoc() + + if err != nil { + return patch + } + + mergeDocs(curDoc, patchDoc, mergeMerge) + + return cur +} + +func mergeDocs(doc, patch *partialDoc, mergeMerge bool) { + for k, v := range patch.obj { + if v == nil { + if mergeMerge { + idx := -1 + for i, key := range doc.keys { + if key == k { + idx = i + break + } + } + if idx == -1 { + doc.keys = append(doc.keys, k) + } + doc.obj[k] = nil + } else { + _ = doc.remove(k, &ApplyOptions{}) + } + } else { + cur, ok := doc.obj[k] + + if !ok || cur == nil { + if !mergeMerge { + pruneNulls(v) + } + _ = doc.set(k, v, &ApplyOptions{}) + } else { + _ = doc.set(k, merge(cur, v, mergeMerge), &ApplyOptions{}) + } + } + } +} + +func pruneNulls(n *lazyNode) { + sub, err := n.intoDoc() + + if err == nil { + pruneDocNulls(sub) + } else { + ary, err := n.intoAry() + + if err == nil { + pruneAryNulls(ary) + } + } +} + +func pruneDocNulls(doc *partialDoc) *partialDoc { + for k, v := range doc.obj { + if v == nil { + _ = doc.remove(k, &ApplyOptions{}) + } else { + pruneNulls(v) + } + } + + return doc +} + +func pruneAryNulls(ary *partialArray) *partialArray { + newAry := []*lazyNode{} + + for _, v := range *ary { + if v != nil { + pruneNulls(v) + } + newAry = append(newAry, v) + } + + *ary = newAry + + return ary +} + +var errBadJSONDoc = fmt.Errorf("Invalid JSON Document") +var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch") +var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents") + +// MergeMergePatches merges two merge patches together, such that +// applying this resulting merged merge patch to a document yields the same +// as merging each merge patch to the document in succession. +func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) { + return doMergePatch(patch1Data, patch2Data, true) +} + +// MergePatch merges the patchData into the docData. +func MergePatch(docData, patchData []byte) ([]byte, error) { + return doMergePatch(docData, patchData, false) +} + +func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) { + doc := &partialDoc{} + + docErr := json.Unmarshal(docData, doc) + + patch := &partialDoc{} + + patchErr := json.Unmarshal(patchData, patch) + + if isSyntaxError(docErr) { + return nil, errBadJSONDoc + } + + if isSyntaxError(patchErr) { + return nil, errBadJSONPatch + } + + if docErr == nil && doc.obj == nil { + return nil, errBadJSONDoc + } + + if patchErr == nil && patch.obj == nil { + return nil, errBadJSONPatch + } + + if docErr != nil || patchErr != nil { + // Not an error, just not a doc, so we turn straight into the patch + if patchErr == nil { + if mergeMerge { + doc = patch + } else { + doc = pruneDocNulls(patch) + } + } else { + patchAry := &partialArray{} + patchErr = json.Unmarshal(patchData, patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + pruneAryNulls(patchAry) + + out, patchErr := json.Marshal(patchAry) + + if patchErr != nil { + return nil, errBadJSONPatch + } + + return out, nil + } + } else { + mergeDocs(doc, patch, mergeMerge) + } + + return json.Marshal(doc) +} + +func isSyntaxError(err error) bool { + if _, ok := err.(*json.SyntaxError); ok { + return true + } + if _, ok := err.(*syntaxError); ok { + return true + } + return false +} + +// resemblesJSONArray indicates whether the byte-slice "appears" to be +// a JSON array or not. +// False-positives are possible, as this function does not check the internal +// structure of the array. It only checks that the outer syntax is present and +// correct. +func resemblesJSONArray(input []byte) bool { + input = bytes.TrimSpace(input) + + hasPrefix := bytes.HasPrefix(input, []byte("[")) + hasSuffix := bytes.HasSuffix(input, []byte("]")) + + return hasPrefix && hasSuffix +} + +// CreateMergePatch will return a merge patch document capable of converting +// the original document(s) to the modified document(s). +// The parameters can be bytes of either two JSON Documents, or two arrays of +// JSON documents. +// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07 +func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalResemblesArray := resemblesJSONArray(originalJSON) + modifiedResemblesArray := resemblesJSONArray(modifiedJSON) + + // Do both byte-slices seem like JSON arrays? + if originalResemblesArray && modifiedResemblesArray { + return createArrayMergePatch(originalJSON, modifiedJSON) + } + + // Are both byte-slices are not arrays? Then they are likely JSON objects... + if !originalResemblesArray && !modifiedResemblesArray { + return createObjectMergePatch(originalJSON, modifiedJSON) + } + + // None of the above? Then return an error because of mismatched types. + return nil, errBadMergeTypes +} + +// createObjectMergePatch will return a merge-patch document capable of +// converting the original document to the modified document. +func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDoc := map[string]interface{}{} + modifiedDoc := map[string]interface{}{} + + err := json.Unmarshal(originalJSON, &originalDoc) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDoc) + if err != nil { + return nil, errBadJSONDoc + } + + dest, err := getDiff(originalDoc, modifiedDoc) + if err != nil { + return nil, err + } + + return json.Marshal(dest) +} + +// createArrayMergePatch will return an array of merge-patch documents capable +// of converting the original document to the modified document for each +// pair of JSON documents provided in the arrays. +// Arrays of mismatched sizes will result in an error. +func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) { + originalDocs := []json.RawMessage{} + modifiedDocs := []json.RawMessage{} + + err := json.Unmarshal(originalJSON, &originalDocs) + if err != nil { + return nil, errBadJSONDoc + } + + err = json.Unmarshal(modifiedJSON, &modifiedDocs) + if err != nil { + return nil, errBadJSONDoc + } + + total := len(originalDocs) + if len(modifiedDocs) != total { + return nil, errBadJSONDoc + } + + result := []json.RawMessage{} + for i := 0; i < len(originalDocs); i++ { + original := originalDocs[i] + modified := modifiedDocs[i] + + patch, err := createObjectMergePatch(original, modified) + if err != nil { + return nil, err + } + + result = append(result, json.RawMessage(patch)) + } + + return json.Marshal(result) +} + +// Returns true if the array matches (must be json types). +// As is idiomatic for go, an empty array is not the same as a nil array. +func matchesArray(a, b []interface{}) bool { + if len(a) != len(b) { + return false + } + if (a == nil && b != nil) || (a != nil && b == nil) { + return false + } + for i := range a { + if !matchesValue(a[i], b[i]) { + return false + } + } + return true +} + +// Returns true if the values matches (must be json types) +// The types of the values must match, otherwise it will always return false +// If two map[string]interface{} are given, all elements must match. +func matchesValue(av, bv interface{}) bool { + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + return false + } + switch at := av.(type) { + case string: + bt := bv.(string) + if bt == at { + return true + } + case float64: + bt := bv.(float64) + if bt == at { + return true + } + case bool: + bt := bv.(bool) + if bt == at { + return true + } + case nil: + // Both nil, fine. + return true + case map[string]interface{}: + bt := bv.(map[string]interface{}) + if len(bt) != len(at) { + return false + } + for key := range bt { + av, aOK := at[key] + bv, bOK := bt[key] + if aOK != bOK { + return false + } + if !matchesValue(av, bv) { + return false + } + } + return true + case []interface{}: + bt := bv.([]interface{}) + return matchesArray(at, bt) + } + return false +} + +// getDiff returns the (recursive) difference between a and b as a map[string]interface{}. +func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) { + into := map[string]interface{}{} + for key, bv := range b { + av, ok := a[key] + // value was added + if !ok { + into[key] = bv + continue + } + // If types have changed, replace completely + if reflect.TypeOf(av) != reflect.TypeOf(bv) { + into[key] = bv + continue + } + // Types are the same, compare values + switch at := av.(type) { + case map[string]interface{}: + bt := bv.(map[string]interface{}) + dst := make(map[string]interface{}, len(bt)) + dst, err := getDiff(at, bt) + if err != nil { + return nil, err + } + if len(dst) > 0 { + into[key] = dst + } + case string, float64, bool: + if !matchesValue(av, bv) { + into[key] = bv + } + case []interface{}: + bt := bv.([]interface{}) + if !matchesArray(at, bt) { + into[key] = bv + } + case nil: + switch bv.(type) { + case nil: + // Both nil, fine. + default: + into[key] = bv + } + default: + panic(fmt.Sprintf("Unknown type:%T in key %s", av, key)) + } + } + // Now add all deleted values as nil + for key := range a { + _, found := b[key] + if !found { + into[key] = nil + } + } + return into, nil +} diff --git a/vendor/github.com/evanphx/json-patch/v5/patch.go b/vendor/github.com/evanphx/json-patch/v5/patch.go new file mode 100644 index 00000000..117f2c00 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/v5/patch.go @@ -0,0 +1,1135 @@ +package jsonpatch + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +const ( + eRaw = iota + eDoc + eAry +) + +var ( + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool = true + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 = 0 + startObject = json.Delim('{') + endObject = json.Delim('}') + startArray = json.Delim('[') + endArray = json.Delim(']') +) + +var ( + ErrTestFailed = errors.New("test failed") + ErrMissing = errors.New("missing value") + ErrUnknownType = errors.New("unknown object type") + ErrInvalid = errors.New("invalid state detected") + ErrInvalidIndex = errors.New("invalid index referenced") + + rawJSONArray = []byte("[]") + rawJSONObject = []byte("{}") + rawJSONNull = []byte("null") +) + +type lazyNode struct { + raw *json.RawMessage + doc *partialDoc + ary partialArray + which int +} + +// Operation is a single JSON-Patch step, such as a single 'add' operation. +type Operation map[string]*json.RawMessage + +// Patch is an ordered collection of Operations. +type Patch []Operation + +type partialDoc struct { + keys []string + obj map[string]*lazyNode +} + +type partialArray []*lazyNode + +type container interface { + get(key string, options *ApplyOptions) (*lazyNode, error) + set(key string, val *lazyNode, options *ApplyOptions) error + add(key string, val *lazyNode, options *ApplyOptions) error + remove(key string, options *ApplyOptions) error +} + +// ApplyOptions specifies options for calls to ApplyWithOptions. +// Use NewApplyOptions to obtain default values for ApplyOptions. +type ApplyOptions struct { + // SupportNegativeIndices decides whether to support non-standard practice of + // allowing negative indices to mean indices starting at the end of an array. + // Default to true. + SupportNegativeIndices bool + // AccumulatedCopySizeLimit limits the total size increase in bytes caused by + // "copy" operations in a patch. + AccumulatedCopySizeLimit int64 + // AllowMissingPathOnRemove indicates whether to fail "remove" operations when the target path is missing. + // Default to false. + AllowMissingPathOnRemove bool + // EnsurePathExistsOnAdd instructs json-patch to recursively create the missing parts of path on "add" operation. + // Default to false. + EnsurePathExistsOnAdd bool +} + +// NewApplyOptions creates a default set of options for calls to ApplyWithOptions. +func NewApplyOptions() *ApplyOptions { + return &ApplyOptions{ + SupportNegativeIndices: SupportNegativeIndices, + AccumulatedCopySizeLimit: AccumulatedCopySizeLimit, + AllowMissingPathOnRemove: false, + EnsurePathExistsOnAdd: false, + } +} + +func newLazyNode(raw *json.RawMessage) *lazyNode { + return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw} +} + +func newRawMessage(buf []byte) *json.RawMessage { + ra := make(json.RawMessage, len(buf)) + copy(ra, buf) + return &ra +} + +func (n *lazyNode) MarshalJSON() ([]byte, error) { + switch n.which { + case eRaw: + return json.Marshal(n.raw) + case eDoc: + return json.Marshal(n.doc) + case eAry: + return json.Marshal(n.ary) + default: + return nil, ErrUnknownType + } +} + +func (n *lazyNode) UnmarshalJSON(data []byte) error { + dest := make(json.RawMessage, len(data)) + copy(dest, data) + n.raw = &dest + n.which = eRaw + return nil +} + +func (n *partialDoc) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + if _, err := buf.WriteString("{"); err != nil { + return nil, err + } + for i, k := range n.keys { + if i > 0 { + if _, err := buf.WriteString(", "); err != nil { + return nil, err + } + } + key, err := json.Marshal(k) + if err != nil { + return nil, err + } + if _, err := buf.Write(key); err != nil { + return nil, err + } + if _, err := buf.WriteString(": "); err != nil { + return nil, err + } + value, err := json.Marshal(n.obj[k]) + if err != nil { + return nil, err + } + if _, err := buf.Write(value); err != nil { + return nil, err + } + } + if _, err := buf.WriteString("}"); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +type syntaxError struct { + msg string +} + +func (err *syntaxError) Error() string { + return err.msg +} + +func (n *partialDoc) UnmarshalJSON(data []byte) error { + if err := json.Unmarshal(data, &n.obj); err != nil { + return err + } + buffer := bytes.NewBuffer(data) + d := json.NewDecoder(buffer) + if t, err := d.Token(); err != nil { + return err + } else if t != startObject { + return &syntaxError{fmt.Sprintf("unexpected JSON token in document node: %s", t)} + } + for d.More() { + k, err := d.Token() + if err != nil { + return err + } + key, ok := k.(string) + if !ok { + return &syntaxError{fmt.Sprintf("unexpected JSON token as document node key: %s", k)} + } + if err := skipValue(d); err != nil { + return err + } + n.keys = append(n.keys, key) + } + return nil +} + +func skipValue(d *json.Decoder) error { + t, err := d.Token() + if err != nil { + return err + } + if t != startObject && t != startArray { + return nil + } + for d.More() { + if t == startObject { + // consume key token + if _, err := d.Token(); err != nil { + return err + } + } + if err := skipValue(d); err != nil { + return err + } + } + end, err := d.Token() + if err != nil { + return err + } + if t == startObject && end != endObject { + return &syntaxError{msg: "expected close object token"} + } + if t == startArray && end != endArray { + return &syntaxError{msg: "expected close object token"} + } + return nil +} + +func deepCopy(src *lazyNode) (*lazyNode, int, error) { + if src == nil { + return nil, 0, nil + } + a, err := src.MarshalJSON() + if err != nil { + return nil, 0, err + } + sz := len(a) + return newLazyNode(newRawMessage(a)), sz, nil +} + +func (n *lazyNode) intoDoc() (*partialDoc, error) { + if n.which == eDoc { + return n.doc, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return nil, err + } + + n.which = eDoc + return n.doc, nil +} + +func (n *lazyNode) intoAry() (*partialArray, error) { + if n.which == eAry { + return &n.ary, nil + } + + if n.raw == nil { + return nil, ErrInvalid + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return nil, err + } + + n.which = eAry + return &n.ary, nil +} + +func (n *lazyNode) compact() []byte { + buf := &bytes.Buffer{} + + if n.raw == nil { + return nil + } + + err := json.Compact(buf, *n.raw) + + if err != nil { + return *n.raw + } + + return buf.Bytes() +} + +func (n *lazyNode) tryDoc() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.doc) + + if err != nil { + return false + } + + n.which = eDoc + return true +} + +func (n *lazyNode) tryAry() bool { + if n.raw == nil { + return false + } + + err := json.Unmarshal(*n.raw, &n.ary) + + if err != nil { + return false + } + + n.which = eAry + return true +} + +func (n *lazyNode) equal(o *lazyNode) bool { + if n.which == eRaw { + if !n.tryDoc() && !n.tryAry() { + if o.which != eRaw { + return false + } + + return bytes.Equal(n.compact(), o.compact()) + } + } + + if n.which == eDoc { + if o.which == eRaw { + if !o.tryDoc() { + return false + } + } + + if o.which != eDoc { + return false + } + + if len(n.doc.obj) != len(o.doc.obj) { + return false + } + + for k, v := range n.doc.obj { + ov, ok := o.doc.obj[k] + + if !ok { + return false + } + + if (v == nil) != (ov == nil) { + return false + } + + if v == nil && ov == nil { + continue + } + + if !v.equal(ov) { + return false + } + } + + return true + } + + if o.which != eAry && !o.tryAry() { + return false + } + + if len(n.ary) != len(o.ary) { + return false + } + + for idx, val := range n.ary { + if !val.equal(o.ary[idx]) { + return false + } + } + + return true +} + +// Kind reads the "op" field of the Operation. +func (o Operation) Kind() string { + if obj, ok := o["op"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown" + } + + return op + } + + return "unknown" +} + +// Path reads the "path" field of the Operation. +func (o Operation) Path() (string, error) { + if obj, ok := o["path"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation missing path field") +} + +// From reads the "from" field of the Operation. +func (o Operation) From() (string, error) { + if obj, ok := o["from"]; ok && obj != nil { + var op string + + err := json.Unmarshal(*obj, &op) + + if err != nil { + return "unknown", err + } + + return op, nil + } + + return "unknown", errors.Wrapf(ErrMissing, "operation, missing from field") +} + +func (o Operation) value() *lazyNode { + if obj, ok := o["value"]; ok { + return newLazyNode(obj) + } + + return nil +} + +// ValueInterface decodes the operation value into an interface. +func (o Operation) ValueInterface() (interface{}, error) { + if obj, ok := o["value"]; ok && obj != nil { + var v interface{} + + err := json.Unmarshal(*obj, &v) + + if err != nil { + return nil, err + } + + return v, nil + } + + return nil, errors.Wrapf(ErrMissing, "operation, missing value field") +} + +func isArray(buf []byte) bool { +Loop: + for _, c := range buf { + switch c { + case ' ': + case '\n': + case '\t': + continue + case '[': + return true + default: + break Loop + } + } + + return false +} + +func findObject(pd *container, path string, options *ApplyOptions) (container, string) { + doc := *pd + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil, "" + } + + parts := split[1 : len(split)-1] + + key := split[len(split)-1] + + var err error + + for _, part := range parts { + + next, ok := doc.get(decodePatchKey(part), options) + + if next == nil || ok != nil { + return nil, "" + } + + if isArray(*next.raw) { + doc, err = next.intoAry() + + if err != nil { + return nil, "" + } + } else { + doc, err = next.intoDoc() + + if err != nil { + return nil, "" + } + } + } + + return doc, decodePatchKey(key) +} + +func (d *partialDoc) set(key string, val *lazyNode, options *ApplyOptions) error { + found := false + for _, k := range d.keys { + if k == key { + found = true + break + } + } + if !found { + d.keys = append(d.keys, key) + } + d.obj[key] = val + return nil +} + +func (d *partialDoc) add(key string, val *lazyNode, options *ApplyOptions) error { + return d.set(key, val, options) +} + +func (d *partialDoc) get(key string, options *ApplyOptions) (*lazyNode, error) { + v, ok := d.obj[key] + if !ok { + return v, errors.Wrapf(ErrMissing, "unable to get nonexistent key: %s", key) + } + return v, nil +} + +func (d *partialDoc) remove(key string, options *ApplyOptions) error { + _, ok := d.obj[key] + if !ok { + if options.AllowMissingPathOnRemove { + return nil + } + return errors.Wrapf(ErrMissing, "unable to remove nonexistent key: %s", key) + } + idx := -1 + for i, k := range d.keys { + if k == key { + idx = i + break + } + } + d.keys = append(d.keys[0:idx], d.keys[idx+1:]...) + delete(d.obj, key) + return nil +} + +// set should only be used to implement the "replace" operation, so "key" must +// be an already existing index in "d". +func (d *partialArray) set(key string, val *lazyNode, options *ApplyOptions) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + if idx < 0 { + if !options.SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + (*d)[idx] = val + return nil +} + +func (d *partialArray) add(key string, val *lazyNode, options *ApplyOptions) error { + if key == "-" { + *d = append(*d, val) + return nil + } + + idx, err := strconv.Atoi(key) + if err != nil { + return errors.Wrapf(err, "value was not a proper array index: '%s'", key) + } + + sz := len(*d) + 1 + + ary := make([]*lazyNode, sz) + + cur := *d + + if idx >= len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !options.SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(ary) { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(ary) + } + + copy(ary[0:idx], cur[0:idx]) + ary[idx] = val + copy(ary[idx+1:], cur[idx:]) + + *d = ary + return nil +} + +func (d *partialArray) get(key string, options *ApplyOptions) (*lazyNode, error) { + idx, err := strconv.Atoi(key) + + if err != nil { + return nil, err + } + + if idx < 0 { + if !options.SupportNegativeIndices { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(*d) + } + + if idx >= len(*d) { + return nil, errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + return (*d)[idx], nil +} + +func (d *partialArray) remove(key string, options *ApplyOptions) error { + idx, err := strconv.Atoi(key) + if err != nil { + return err + } + + cur := *d + + if idx >= len(cur) { + if options.AllowMissingPathOnRemove { + return nil + } + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + + if idx < 0 { + if !options.SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + if idx < -len(cur) { + if options.AllowMissingPathOnRemove { + return nil + } + return errors.Wrapf(ErrInvalidIndex, "Unable to access invalid index: %d", idx) + } + idx += len(cur) + } + + ary := make([]*lazyNode, len(cur)-1) + + copy(ary[0:idx], cur[0:idx]) + copy(ary[idx:], cur[idx+1:]) + + *d = ary + return nil +} + +func (p Patch) add(doc *container, op Operation, options *ApplyOptions) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "add operation failed to decode path") + } + + if options.EnsurePathExistsOnAdd { + err = ensurePathExists(doc, path, options) + + if err != nil { + return err + } + } + + con, key := findObject(doc, path, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "add operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.add(key, op.value(), options) + if err != nil { + return errors.Wrapf(err, "error in add for path: '%s'", path) + } + + return nil +} + +// Given a document and a path to a key, walk the path and create all missing elements +// creating objects and arrays as needed. +func ensurePathExists(pd *container, path string, options *ApplyOptions) error { + doc := *pd + + var err error + var arrIndex int + + split := strings.Split(path, "/") + + if len(split) < 2 { + return nil + } + + parts := split[1:] + + for pi, part := range parts { + + // Have we reached the key part of the path? + // If yes, we're done. + if pi == len(parts)-1 { + return nil + } + + target, ok := doc.get(decodePatchKey(part), options) + + if target == nil || ok != nil { + + // If the current container is an array which has fewer elements than our target index, + // pad the current container with nulls. + if arrIndex, err = strconv.Atoi(part); err == nil { + pa, ok := doc.(*partialArray) + + if ok && arrIndex >= len(*pa)+1 { + // Pad the array with null values up to the required index. + for i := len(*pa); i <= arrIndex-1; i++ { + doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options) + } + } + } + + // Check if the next part is a numeric index or "-". + // If yes, then create an array, otherwise, create an object. + if arrIndex, err = strconv.Atoi(parts[pi+1]); err == nil || parts[pi+1] == "-" { + if arrIndex < 0 { + + if !options.SupportNegativeIndices { + return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for invalid index: %d", arrIndex) + } + + if arrIndex < -1 { + return errors.Wrapf(ErrInvalidIndex, "Unable to ensure path for negative index other than -1: %d", arrIndex) + } + + arrIndex = 0 + } + + newNode := newLazyNode(newRawMessage(rawJSONArray)) + doc.add(part, newNode, options) + doc, _ = newNode.intoAry() + + // Pad the new array with null values up to the required index. + for i := 0; i < arrIndex; i++ { + doc.add(strconv.Itoa(i), newLazyNode(newRawMessage(rawJSONNull)), options) + } + } else { + newNode := newLazyNode(newRawMessage(rawJSONObject)) + + doc.add(part, newNode, options) + doc, _ = newNode.intoDoc() + } + } else { + if isArray(*target.raw) { + doc, err = target.intoAry() + + if err != nil { + return err + } + } else { + doc, err = target.intoDoc() + + if err != nil { + return err + } + } + } + } + + return nil +} + +func (p Patch) remove(doc *container, op Operation, options *ApplyOptions) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "remove operation failed to decode path") + } + + con, key := findObject(doc, path, options) + + if con == nil { + if options.AllowMissingPathOnRemove { + return nil + } + return errors.Wrapf(ErrMissing, "remove operation does not apply: doc is missing path: \"%s\"", path) + } + + err = con.remove(key, options) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) replace(doc *container, op Operation, options *ApplyOptions) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "replace operation failed to decode path") + } + + if path == "" { + val := op.value() + + if val.which == eRaw { + if !val.tryDoc() { + if !val.tryAry() { + return errors.Wrapf(err, "replace operation value must be object or array") + } + } + } + + switch val.which { + case eAry: + *doc = &val.ary + case eDoc: + *doc = val.doc + case eRaw: + return errors.Wrapf(err, "replace operation hit impossible case") + } + + return nil + } + + con, key := findObject(doc, path, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing path: %s", path) + } + + _, ok := con.get(key, options) + if ok != nil { + return errors.Wrapf(ErrMissing, "replace operation does not apply: doc is missing key: %s", path) + } + + err = con.set(key, op.value(), options) + if err != nil { + return errors.Wrapf(err, "error in remove for path: '%s'", path) + } + + return nil +} + +func (p Patch) move(doc *container, op Operation, options *ApplyOptions) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode from") + } + + con, key := findObject(doc, from, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key, options) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + err = con.remove(key, options) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", key) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "move operation failed to decode path") + } + + con, key = findObject(doc, path, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "move operation does not apply: doc is missing destination path: %s", path) + } + + err = con.add(key, val, options) + if err != nil { + return errors.Wrapf(err, "error in move for path: '%s'", path) + } + + return nil +} + +func (p Patch) test(doc *container, op Operation, options *ApplyOptions) error { + path, err := op.Path() + if err != nil { + return errors.Wrapf(err, "test operation failed to decode path") + } + + if path == "" { + var self lazyNode + + switch sv := (*doc).(type) { + case *partialDoc: + self.doc = sv + self.which = eDoc + case *partialArray: + self.ary = *sv + self.which = eAry + } + + if self.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + con, key := findObject(doc, path, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "test operation does not apply: is missing path: %s", path) + } + + val, err := con.get(key, options) + if err != nil && errors.Cause(err) != ErrMissing { + return errors.Wrapf(err, "error in test for path: '%s'", path) + } + + if val == nil { + if op.value().raw == nil { + return nil + } + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } else if op.value() == nil { + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) + } + + if val.equal(op.value()) { + return nil + } + + return errors.Wrapf(ErrTestFailed, "testing value %s failed", path) +} + +func (p Patch) copy(doc *container, op Operation, accumulatedCopySize *int64, options *ApplyOptions) error { + from, err := op.From() + if err != nil { + return errors.Wrapf(err, "copy operation failed to decode from") + } + + con, key := findObject(doc, from, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing from path: %s", from) + } + + val, err := con.get(key, options) + if err != nil { + return errors.Wrapf(err, "error in copy for from: '%s'", from) + } + + path, err := op.Path() + if err != nil { + return errors.Wrapf(ErrMissing, "copy operation failed to decode path") + } + + con, key = findObject(doc, path, options) + + if con == nil { + return errors.Wrapf(ErrMissing, "copy operation does not apply: doc is missing destination path: %s", path) + } + + valCopy, sz, err := deepCopy(val) + if err != nil { + return errors.Wrapf(err, "error while performing deep copy") + } + + (*accumulatedCopySize) += int64(sz) + if options.AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > options.AccumulatedCopySizeLimit { + return NewAccumulatedCopySizeError(options.AccumulatedCopySizeLimit, *accumulatedCopySize) + } + + err = con.add(key, valCopy, options) + if err != nil { + return errors.Wrapf(err, "error while adding value during copy") + } + + return nil +} + +// Equal indicates if 2 JSON documents have the same structural equality. +func Equal(a, b []byte) bool { + la := newLazyNode(newRawMessage(a)) + lb := newLazyNode(newRawMessage(b)) + + return la.equal(lb) +} + +// DecodePatch decodes the passed JSON document as an RFC 6902 patch. +func DecodePatch(buf []byte) (Patch, error) { + var p Patch + + err := json.Unmarshal(buf, &p) + + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply mutates a JSON document according to the patch, and returns the new +// document. +func (p Patch) Apply(doc []byte) ([]byte, error) { + return p.ApplyWithOptions(doc, NewApplyOptions()) +} + +// ApplyWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. +// It returns the new document. +func (p Patch) ApplyWithOptions(doc []byte, options *ApplyOptions) ([]byte, error) { + return p.ApplyIndentWithOptions(doc, "", options) +} + +// ApplyIndent mutates a JSON document according to the patch, and returns the new +// document indented. +func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) { + return p.ApplyIndentWithOptions(doc, indent, NewApplyOptions()) +} + +// ApplyIndentWithOptions mutates a JSON document according to the patch and the passed in ApplyOptions. +// It returns the new document indented. +func (p Patch) ApplyIndentWithOptions(doc []byte, indent string, options *ApplyOptions) ([]byte, error) { + if len(doc) == 0 { + return doc, nil + } + + var pd container + if doc[0] == '[' { + pd = &partialArray{} + } else { + pd = &partialDoc{} + } + + err := json.Unmarshal(doc, pd) + + if err != nil { + return nil, err + } + + err = nil + + var accumulatedCopySize int64 + + for _, op := range p { + switch op.Kind() { + case "add": + err = p.add(&pd, op, options) + case "remove": + err = p.remove(&pd, op, options) + case "replace": + err = p.replace(&pd, op, options) + case "move": + err = p.move(&pd, op, options) + case "test": + err = p.test(&pd, op, options) + case "copy": + err = p.copy(&pd, op, &accumulatedCopySize, options) + default: + err = fmt.Errorf("Unexpected kind: %s", op.Kind()) + } + + if err != nil { + return nil, err + } + } + + if indent != "" { + return json.MarshalIndent(pd, "", indent) + } + + return json.Marshal(pd) +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml deleted file mode 100644 index 03a22fe0..00000000 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.15.x -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: a5VgoiwB1G/AZqzmephPZIhEB9avMlsWSlVnM1dSAtYAwdrQHGTQxAmpOxYIoSPDhWNN5bfZmjd29++UlTwLcHSR+e0kJhH6IfDlsHj/HplNCJ9tyI0zYc7XchtdKgeMxMzBKCzgwFXGSbQGydXTliDNBo0HOzmY3cou/daMFTP60K+offcjS+3LRAYb1EroSRXZqrk1nuF/xDL3792DZUdPMiFR/L/Df6y74D6/QP4sTkTDFQitz4Wy/7jbsfj8dG6qK2zivgV6/l+w4OVjFkxVpPXogDWY10vVXNVynqxfJ7to2d1I9lNCHE2ilBCkWMIPdyJF7hjF8pKW+82yP4EzRh0vu8Xn0HT5MZpQxdRY/YMxNrWaG7SxsoEaO4q5uhgdzAqLYY3TRa7MjIK+7Ur+aqOeTXn6OKwVi0CjvZ6mIU3WUKSwiwkFZMbjRAkSb5CYwMEfGFO/z964xz83qGt6WAtBXNotqCQpTIiKtDHQeLOMfksHImCg6JLhQcWBVxamVgu0G3Pdh8Y6DyPnxraXY95+QDavbjqv7TeYT9T/FNnrkXaTTK0s4iWE5H4ACU0Qvz0wUYgfQrZv0/Hp7V17+rabUwnzYySHCy9SWX/7OV9Cfh31iMp9ZIffr76xmmThtOEqs8TrTtU6BWI3rWwvA9cXQipZTVtL0oswrGw= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml index f9381aee..013fc194 100644 --- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml +++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml @@ -1,8 +1,6 @@ linters-settings: govet: check-shadowing: true - golint: - min-confidence: 0 gocyclo: min-complexity: 30 maligned: @@ -12,6 +10,8 @@ linters-settings: goconst: min-len: 2 min-occurrences: 4 + paralleltest: + ignore-missing: true linters: enable-all: true disable: @@ -39,3 +39,12 @@ linters: - nestif - godot - errorlint + - varcheck + - interfacer + - deadcode + - golint + - ifshort + - structcheck + - nosnakecase + - varnamelen + - exhaustruct diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml deleted file mode 100644 index 05482f4b..00000000 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -install: -- go get gotest.tools/gotestsum -jobs: - include: - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -env: -- GO111MODULE=on -language: go -notifications: - slack: - secure: OpQG/36F7DSF00HLm9WZMhyqFCYYyYTsVDObW226cWiR8PWYiNfLZiSEvIzT1Gx4dDjhigKTIqcLhG34CkL5iNXDjm9Yyo2RYhQPlK8NErNqUEXuBqn4RqYHW48VGhEhOyDd4Ei0E2FN5ZbgpvHgtpkdZ6XDi64r3Ac89isP9aPHXQTuv2Jog6b4/OKKiUTftLcTIst0p4Cp3gqOJWf1wnoj+IadWiECNVQT6zb47IYjtyw6+uV8iUjTzdKcRB6Zc6b4Dq7JAg1Zd7Jfxkql3hlKp4PNlRf9Cy7y5iA3G7MLyg3FcPX5z2kmcyPt2jOTRMBWUJ5zIQpOxizAcN8WsT3WWBL5KbuYK6k0PzujrIDLqdxGpNmjkkMfDBT9cKmZpm2FdW+oZgPFJP+oKmAo4u4KJz/vjiPTXgQlN5bmrLuRMCp+AwC5wkIohTqWZVPE2TK6ZSnMYcg/W39s+RP/9mJoyryAvPSpBOLTI+biCgaUCTOAZxNTWpMFc3tPYntc41WWkdKcooZ9JA5DwfcaVFyTGQ3YXz+HvX6G1z/gW0Q/A4dBi9mj2iE1xm7tRTT+4VQ2AXFvSEI1HJpfPgYnwAtwOD1v3Qm2EUHk9sCdtEDR4wVGEPIVn44GnwFMnGKx9JWppMPYwFu3SVDdHt+E+LOlhZUply11Aa+IVrT2KUQ= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go index 8956c308..f0610cf1 100644 --- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go +++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go @@ -7,8 +7,8 @@ import ( ) const ( - defaultHttpPort = ":80" - defaultHttpsPort = ":443" + defaultHTTPPort = ":80" + defaultHTTPSPort = ":443" ) // Regular expressions used by the normalizations @@ -18,18 +18,24 @@ var rxDupSlashes = regexp.MustCompile(`/{2,}`) // NormalizeURL will normalize the specified URL // This was added to replace a previous call to the no longer maintained purell library: // The call that was used looked like the following: -// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) +// +// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes)) // // To explain all that was included in the call above, purell.FlagsSafe was really just the following: -// - FlagLowercaseScheme -// - FlagLowercaseHost -// - FlagRemoveDefaultPort -// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// - FlagLowercaseScheme +// - FlagLowercaseHost +// - FlagRemoveDefaultPort +// - FlagRemoveDuplicateSlashes (and this was mixed in with the |) +// +// This also normalizes the URL into its urlencoded form by removing RawPath and RawFragment. func NormalizeURL(u *url.URL) { lowercaseScheme(u) lowercaseHost(u) removeDefaultPort(u) removeDuplicateSlashes(u) + + u.RawPath = "" + u.RawFragment = "" } func lowercaseScheme(u *url.URL) { @@ -48,7 +54,7 @@ func removeDefaultPort(u *url.URL) { if len(u.Host) > 0 { scheme := strings.ToLower(u.Scheme) u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string { - if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) { + if (scheme == "http" && val == defaultHTTPPort) || (scheme == "https" && val == defaultHTTPSPort) { return "" } return val diff --git a/vendor/github.com/go-openapi/swag/.gitattributes b/vendor/github.com/go-openapi/swag/.gitattributes new file mode 100644 index 00000000..49ad5276 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/.gitattributes @@ -0,0 +1,2 @@ +# gofmt always uses LF, whereas Git uses CRLF on Windows. +*.go text eol=lf diff --git a/vendor/github.com/go-openapi/swag/.golangci.yml b/vendor/github.com/go-openapi/swag/.golangci.yml index 813c47aa..bf503e40 100644 --- a/vendor/github.com/go-openapi/swag/.golangci.yml +++ b/vendor/github.com/go-openapi/swag/.golangci.yml @@ -37,3 +37,18 @@ linters: - gci - gocognit - paralleltest + - thelper + - ifshort + - gomoddirectives + - cyclop + - forcetypeassert + - ireturn + - tagliatelle + - varnamelen + - goimports + - tenv + - golint + - exhaustruct + - nilnil + - nonamedreturns + - nosnakecase diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml deleted file mode 100644 index fc25a887..00000000 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ /dev/null @@ -1,37 +0,0 @@ -after_success: -- bash <(curl -s https://codecov.io/bash) -go: -- 1.14.x -- 1.x -arch: -- amd64 -jobs: - include: - # include arch ppc, but only for latest go version - skip testing for race - - go: 1.x - arch: ppc64le - install: ~ - script: - - go test -v - - #- go: 1.x - # arch: arm - # install: ~ - # script: - # - go test -v - - # include linting job, but only for latest go version and amd64 arch - - go: 1.x - arch: amd64 - install: - go get github.com/golangci/golangci-lint/cmd/golangci-lint - script: - - golangci-lint run --new-from-rev master -install: -- GO111MODULE=off go get -u gotest.tools/gotestsum -language: go -notifications: - slack: - secure: QUWvCkBBK09GF7YtEvHHVt70JOkdlNBG0nIKu/5qc4/nW5HP8I2w0SEf/XR2je0eED1Qe3L/AfMCWwrEj+IUZc3l4v+ju8X8R3Lomhme0Eb0jd1MTMCuPcBT47YCj0M7RON7vXtbFfm1hFJ/jLe5+9FXz0hpXsR24PJc5ZIi/ogNwkaPqG4BmndzecpSh0vc2FJPZUD9LT0I09REY/vXR0oQAalLkW0asGD5taHZTUZq/kBpsNxaAFrLM23i4mUcf33M5fjLpvx5LRICrX/57XpBrDh2TooBU6Qj3CgoY0uPRYUmSNxbVx1czNzl2JtEpb5yjoxfVPQeg0BvQM00G8LJINISR+ohrjhkZmAqchDupAX+yFrxTtORa78CtnIL6z/aTNlgwwVD8kvL/1pFA/JWYmKDmz93mV/+6wubGzNSQCstzjkFA4/iZEKewKUoRIAi/fxyscP6L/rCpmY/4llZZvrnyTqVbt6URWpopUpH4rwYqreXAtJxJsfBJIeSmUIiDIOMGkCTvyTEW3fWGmGoqWtSHLoaWDyAIGb7azb+KvfpWtEcoPFWfSWU+LGee0A/YsUhBl7ADB9A0CJEuR8q4BPpKpfLwPKSiKSAXL7zDkyjExyhtgqbSl2jS+rKIHOZNL8JkCcTP2MKMVd563C5rC5FMKqu3S9m2b6380E= -script: -- gotestsum -f short-verbose -- -race -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/swag/doc.go b/vendor/github.com/go-openapi/swag/doc.go index 8d2c8c50..55094cb7 100644 --- a/vendor/github.com/go-openapi/swag/doc.go +++ b/vendor/github.com/go-openapi/swag/doc.go @@ -17,16 +17,15 @@ Package swag contains a bunch of helper functions for go-openapi and go-swagger You may also use it standalone for your projects. - * convert between value and pointers for builtin types - * convert from string to builtin types (wraps strconv) - * fast json concatenation - * search in path - * load from file or http - * name mangling - + - convert between value and pointers for builtin types + - convert from string to builtin types (wraps strconv) + - fast json concatenation + - search in path + - load from file or http + - name mangling This repo has only few dependencies outside of the standard library: - * YAML utilities depend on gopkg.in/yaml.v2 + - YAML utilities depend on gopkg.in/yaml.v2 */ package swag diff --git a/vendor/github.com/go-openapi/swag/file.go b/vendor/github.com/go-openapi/swag/file.go new file mode 100644 index 00000000..16accc55 --- /dev/null +++ b/vendor/github.com/go-openapi/swag/file.go @@ -0,0 +1,33 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package swag + +import "mime/multipart" + +// File represents an uploaded file. +type File struct { + Data multipart.File + Header *multipart.FileHeader +} + +// Read bytes from the file +func (f *File) Read(p []byte) (n int, err error) { + return f.Data.Read(p) +} + +// Close the file +func (f *File) Close() error { + return f.Data.Close() +} diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 9a604097..00038c37 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -16,10 +16,11 @@ package swag import ( "fmt" - "io/ioutil" + "io" "log" "net/http" "net/url" + "os" "path/filepath" "runtime" "strings" @@ -40,13 +41,13 @@ var LoadHTTPCustomHeaders = map[string]string{} // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) } // LoadFromFileOrHTTPWithTimeout loads the bytes from a file or a remote http server based on the path passed in // timeout arg allows for per request overriding of the request timeout func LoadFromFileOrHTTPWithTimeout(path string, timeout time.Duration) ([]byte, error) { - return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(timeout))(path) + return LoadStrategy(path, os.ReadFile, loadHTTPBytes(timeout))(path) } // LoadStrategy returns a loader function for a given path or uri @@ -86,7 +87,7 @@ func LoadStrategy(path string, local, remote func(string) ([]byte, error)) func( func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return func(path string) ([]byte, error) { client := &http.Client{Timeout: timeout} - req, err := http.NewRequest("GET", path, nil) // nolint: noctx + req, err := http.NewRequest(http.MethodGet, path, nil) //nolint:noctx if err != nil { return nil, err } @@ -115,6 +116,6 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { return nil, fmt.Errorf("could not access document at %q [%s] ", path, resp.Status) } - return ioutil.ReadAll(resp.Body) + return io.ReadAll(resp.Body) } } diff --git a/vendor/github.com/go-openapi/swag/post_go18.go b/vendor/github.com/go-openapi/swag/post_go18.go index c2e686d3..f5228b82 100644 --- a/vendor/github.com/go-openapi/swag/post_go18.go +++ b/vendor/github.com/go-openapi/swag/post_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.8 // +build go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/post_go19.go b/vendor/github.com/go-openapi/swag/post_go19.go index eb2f2d8b..7c7da9c0 100644 --- a/vendor/github.com/go-openapi/swag/post_go19.go +++ b/vendor/github.com/go-openapi/swag/post_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build go1.9 // +build go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go18.go b/vendor/github.com/go-openapi/swag/pre_go18.go index 6607f339..2757d9b9 100644 --- a/vendor/github.com/go-openapi/swag/pre_go18.go +++ b/vendor/github.com/go-openapi/swag/pre_go18.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.8 // +build !go1.8 package swag diff --git a/vendor/github.com/go-openapi/swag/pre_go19.go b/vendor/github.com/go-openapi/swag/pre_go19.go index 4bae187d..0565db37 100644 --- a/vendor/github.com/go-openapi/swag/pre_go19.go +++ b/vendor/github.com/go-openapi/swag/pre_go19.go @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +//go:build !go1.9 // +build !go1.9 package swag diff --git a/vendor/github.com/go-openapi/swag/util.go b/vendor/github.com/go-openapi/swag/util.go index 193702f2..f78ab684 100644 --- a/vendor/github.com/go-openapi/swag/util.go +++ b/vendor/github.com/go-openapi/swag/util.go @@ -99,10 +99,11 @@ const ( ) // JoinByFormat joins a string array by a known format (e.g. swagger's collectionFormat attribute): -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) +// +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func JoinByFormat(data []string, format string) []string { if len(data) == 0 { return data @@ -124,11 +125,11 @@ func JoinByFormat(data []string, format string) []string { } // SplitByFormat splits a string by a known format: -// ssv: space separated value -// tsv: tab separated value -// pipes: pipe (|) separated value -// csv: comma separated value (default) // +// ssv: space separated value +// tsv: tab separated value +// pipes: pipe (|) separated value +// csv: comma separated value (default) func SplitByFormat(data, format string) []string { if data == "" { return nil diff --git a/vendor/github.com/go-openapi/swag/yaml.go b/vendor/github.com/go-openapi/swag/yaml.go index ec969144..f09ee609 100644 --- a/vendor/github.com/go-openapi/swag/yaml.go +++ b/vendor/github.com/go-openapi/swag/yaml.go @@ -22,7 +22,7 @@ import ( "github.com/mailru/easyjson/jlexer" "github.com/mailru/easyjson/jwriter" - yaml "gopkg.in/yaml.v2" + yaml "gopkg.in/yaml.v3" ) // YAMLMatcher matches yaml @@ -43,16 +43,126 @@ func YAMLToJSON(data interface{}) (json.RawMessage, error) { // BytesToYAMLDoc converts a byte slice into a YAML document func BytesToYAMLDoc(data []byte) (interface{}, error) { - var canary map[interface{}]interface{} // validate this is an object and not a different type - if err := yaml.Unmarshal(data, &canary); err != nil { + var document yaml.Node // preserve order that is present in the document + if err := yaml.Unmarshal(data, &document); err != nil { return nil, err } + if document.Kind != yaml.DocumentNode || len(document.Content) != 1 || document.Content[0].Kind != yaml.MappingNode { + return nil, fmt.Errorf("only YAML documents that are objects are supported") + } + return &document, nil +} - var document yaml.MapSlice // preserve order that is present in the document - if err := yaml.Unmarshal(data, &document); err != nil { - return nil, err +func yamlNode(root *yaml.Node) (interface{}, error) { + switch root.Kind { + case yaml.DocumentNode: + return yamlDocument(root) + case yaml.SequenceNode: + return yamlSequence(root) + case yaml.MappingNode: + return yamlMapping(root) + case yaml.ScalarNode: + return yamlScalar(root) + case yaml.AliasNode: + return yamlNode(root.Alias) + default: + return nil, fmt.Errorf("unsupported YAML node type: %v", root.Kind) + } +} + +func yamlDocument(node *yaml.Node) (interface{}, error) { + if len(node.Content) != 1 { + return nil, fmt.Errorf("unexpected YAML Document node content length: %d", len(node.Content)) + } + return yamlNode(node.Content[0]) +} + +func yamlMapping(node *yaml.Node) (interface{}, error) { + m := make(JSONMapSlice, len(node.Content)/2) + + var j int + for i := 0; i < len(node.Content); i += 2 { + var nmi JSONMapItem + k, err := yamlStringScalarC(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML map key: %w", err) + } + nmi.Key = k + v, err := yamlNode(node.Content[i+1]) + if err != nil { + return nil, fmt.Errorf("unable to process YAML map value for key %q: %w", k, err) + } + nmi.Value = v + m[j] = nmi + j++ + } + return m, nil +} + +func yamlSequence(node *yaml.Node) (interface{}, error) { + s := make([]interface{}, 0) + + for i := 0; i < len(node.Content); i++ { + + v, err := yamlNode(node.Content[i]) + if err != nil { + return nil, fmt.Errorf("unable to decode YAML sequence value: %w", err) + } + s = append(s, v) + } + return s, nil +} + +const ( // See https://yaml.org/type/ + yamlStringScalar = "tag:yaml.org,2002:str" + yamlIntScalar = "tag:yaml.org,2002:int" + yamlBoolScalar = "tag:yaml.org,2002:bool" + yamlFloatScalar = "tag:yaml.org,2002:float" + yamlTimestamp = "tag:yaml.org,2002:timestamp" + yamlNull = "tag:yaml.org,2002:null" +) + +func yamlScalar(node *yaml.Node) (interface{}, error) { + switch node.LongTag() { + case yamlStringScalar: + return node.Value, nil + case yamlBoolScalar: + b, err := strconv.ParseBool(node.Value) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting bool content: %w", node.Value, err) + } + return b, nil + case yamlIntScalar: + i, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting integer content: %w", node.Value, err) + } + return i, nil + case yamlFloatScalar: + f, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return nil, fmt.Errorf("unable to process scalar node. Got %q. Expecting float content: %w", node.Value, err) + } + return f, nil + case yamlTimestamp: + return node.Value, nil + case yamlNull: + return nil, nil + default: + return nil, fmt.Errorf("YAML tag %q is not supported", node.LongTag()) + } +} + +func yamlStringScalarC(node *yaml.Node) (string, error) { + if node.Kind != yaml.ScalarNode { + return "", fmt.Errorf("expecting a string scalar but got %q", node.Kind) + } + switch node.LongTag() { + case yamlStringScalar, yamlIntScalar, yamlFloatScalar: + return node.Value, nil + default: + return "", fmt.Errorf("YAML tag %q is not supported as map key", node.LongTag()) } - return document, nil } // JSONMapSlice represent a JSON object, with the order of keys maintained @@ -105,6 +215,113 @@ func (s *JSONMapSlice) UnmarshalEasyJSON(in *jlexer.Lexer) { *s = result } +func (s JSONMapSlice) MarshalYAML() (interface{}, error) { + var n yaml.Node + n.Kind = yaml.DocumentNode + var nodes []*yaml.Node + for _, item := range s { + nn, err := json2yaml(item.Value) + if err != nil { + return nil, err + } + ns := []*yaml.Node{ + { + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: item.Key, + }, + nn, + } + nodes = append(nodes, ns...) + } + + n.Content = []*yaml.Node{ + { + Kind: yaml.MappingNode, + Content: nodes, + }, + } + + return yaml.Marshal(&n) +} + +func json2yaml(item interface{}) (*yaml.Node, error) { + switch val := item.(type) { + case JSONMapSlice: + var n yaml.Node + n.Kind = yaml.MappingNode + for i := range val { + childNode, err := json2yaml(&val[i].Value) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val[i].Key, + }, childNode) + } + return &n, nil + case map[string]interface{}: + var n yaml.Node + n.Kind = yaml.MappingNode + for k, v := range val { + childNode, err := json2yaml(v) + if err != nil { + return nil, err + } + n.Content = append(n.Content, &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: k, + }, childNode) + } + return &n, nil + case []interface{}: + var n yaml.Node + n.Kind = yaml.SequenceNode + for i := range val { + childNode, err := json2yaml(val[i]) + if err != nil { + return nil, err + } + n.Content = append(n.Content, childNode) + } + return &n, nil + case string: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlStringScalar, + Value: val, + }, nil + case float64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlFloatScalar, + Value: strconv.FormatFloat(val, 'f', -1, 64), + }, nil + case int64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatInt(val, 10), + }, nil + case uint64: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlIntScalar, + Value: strconv.FormatUint(val, 10), + }, nil + case bool: + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: yamlBoolScalar, + Value: strconv.FormatBool(val), + }, nil + } + return nil, nil +} + // JSONMapItem represents the value of a key in a JSON object held by JSONMapSlice type JSONMapItem struct { Key string @@ -173,23 +390,10 @@ func transformData(input interface{}) (out interface{}, err error) { } switch in := input.(type) { - case yaml.MapSlice: - - o := make(JSONMapSlice, len(in)) - for i, mi := range in { - var nmi JSONMapItem - if nmi.Key, err = format(mi.Key); err != nil { - return nil, err - } - - v, ert := transformData(mi.Value) - if ert != nil { - return nil, ert - } - nmi.Value = v - o[i] = nmi - } - return o, nil + case yaml.Node: + return yamlNode(&in) + case *yaml.Node: + return yamlNode(in) case map[interface{}]interface{}: o := make(JSONMapSlice, 0, len(in)) for ke, va := range in { diff --git a/vendor/github.com/google/gnostic/jsonschema/display.go b/vendor/github.com/google/gnostic/jsonschema/display.go index 028a760a..8677ed49 100644 --- a/vendor/github.com/google/gnostic/jsonschema/display.go +++ b/vendor/github.com/google/gnostic/jsonschema/display.go @@ -46,8 +46,23 @@ func (schema *Schema) describeSchema(indent string) string { if schema.Schema != nil { result += indent + "$schema: " + *(schema.Schema) + "\n" } + if schema.ReadOnly != nil && *schema.ReadOnly { + result += indent + fmt.Sprintf("readOnly: %+v\n", *(schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + result += indent + fmt.Sprintf("writeOnly: %+v\n", *(schema.WriteOnly)) + } if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema#": + fallthrough + case "#": + fallthrough + case "": + result += indent + "id: " + *(schema.ID) + "\n" + default: + result += indent + "$id: " + *(schema.ID) + "\n" + } } if schema.MultipleOf != nil { result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) diff --git a/vendor/github.com/google/gnostic/jsonschema/models.go b/vendor/github.com/google/gnostic/jsonschema/models.go index 4781bdc5..0d877249 100644 --- a/vendor/github.com/google/gnostic/jsonschema/models.go +++ b/vendor/github.com/google/gnostic/jsonschema/models.go @@ -23,9 +23,11 @@ import "gopkg.in/yaml.v3" // All fields are pointers and are nil if the associated values // are not specified. type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + ReadOnly *bool + WriteOnly *bool // http://json-schema.org/latest/json-schema-validation.html // 5.1. Validation keywords for numeric instances (number and integer) diff --git a/vendor/github.com/google/gnostic/jsonschema/reader.go b/vendor/github.com/google/gnostic/jsonschema/reader.go index b8583d46..a909a341 100644 --- a/vendor/github.com/google/gnostic/jsonschema/reader.go +++ b/vendor/github.com/google/gnostic/jsonschema/reader.go @@ -165,7 +165,6 @@ func NewSchemaFromObject(jsonData *yaml.Node) *Schema { default: fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil } return nil diff --git a/vendor/github.com/google/gnostic/jsonschema/writer.go b/vendor/github.com/google/gnostic/jsonschema/writer.go index 340dc5f9..15b1f905 100644 --- a/vendor/github.com/google/gnostic/jsonschema/writer.go +++ b/vendor/github.com/google/gnostic/jsonschema/writer.go @@ -16,6 +16,7 @@ package jsonschema import ( "fmt" + "strings" "gopkg.in/yaml.v3" ) @@ -33,7 +34,11 @@ func renderMappingNode(node *yaml.Node, indent string) (result string) { value := node.Content[i+1] switch value.Kind { case yaml.ScalarNode: - result += "\"" + value.Value + "\"" + if value.Tag == "!!bool" { + result += value.Value + } else { + result += "\"" + value.Value + "\"" + } case yaml.MappingNode: result += renderMappingNode(value, innerIndent) case yaml.SequenceNode: @@ -58,7 +63,11 @@ func renderSequenceNode(node *yaml.Node, indent string) (result string) { item := node.Content[i] switch item.Kind { case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" + if item.Tag == "!!bool" { + result += innerIndent + item.Value + } else { + result += innerIndent + "\"" + item.Value + "\"" + } case yaml.MappingNode: result += innerIndent + renderMappingNode(item, innerIndent) + "" default: @@ -260,11 +269,26 @@ func (schema *Schema) nodeValue() *yaml.Node { content = appendPair(content, "title", nodeForString(*schema.Title)) } if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) + switch strings.TrimSuffix(*schema.Schema, "#") { + case "http://json-schema.org/draft-04/schema": + fallthrough + case "#": + fallthrough + case "": + content = appendPair(content, "id", nodeForString(*schema.ID)) + default: + content = appendPair(content, "$id", nodeForString(*schema.ID)) + } } if schema.Schema != nil { content = appendPair(content, "$schema", nodeForString(*schema.Schema)) } + if schema.ReadOnly != nil && *schema.ReadOnly { + content = appendPair(content, "readOnly", nodeForBoolean(*schema.ReadOnly)) + } + if schema.WriteOnly != nil && *schema.WriteOnly { + content = appendPair(content, "writeOnly", nodeForBoolean(*schema.WriteOnly)) + } if schema.Type != nil { content = appendPair(content, "type", schema.Type.nodeValue()) } diff --git a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go index 0f179076..28c2777d 100644 --- a/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go +++ b/vendor/github.com/google/gnostic/openapiv2/OpenAPIv2.go @@ -7887,7 +7887,12 @@ func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go index 5f4a7025..d54a84db 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.go @@ -8560,7 +8560,12 @@ func (m *Strings) ToRawInfo() *yaml.Node { if m == nil { return info } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Value)) + } + } return info } diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go index 499e7f93..90a56f55 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.pb.go @@ -16,8 +16,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.26.0 -// protoc v3.18.1 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: openapiv3/OpenAPIv3.proto package openapi_v3 @@ -6760,12 +6760,13 @@ var file_openapiv3_OpenAPIv3_proto_rawDesc = []byte{ 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x16, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x56, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, - 0x5a, 0x16, 0x2e, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, + 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto index 1be335b8..7aede5ed 100644 --- a/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto +++ b/vendor/github.com/google/gnostic/openapiv3/OpenAPIv3.proto @@ -42,7 +42,7 @@ option java_package = "org.openapi_v3"; option objc_class_prefix = "OAS"; // The Go package name. -option go_package = "./openapiv3;openapi_v3"; +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; message AdditionalPropertiesItem { oneof oneof { diff --git a/vendor/github.com/google/gnostic/openapiv3/README.md b/vendor/github.com/google/gnostic/openapiv3/README.md index 5ee12d92..83603b82 100644 --- a/vendor/github.com/google/gnostic/openapiv3/README.md +++ b/vendor/github.com/google/gnostic/openapiv3/README.md @@ -19,3 +19,7 @@ for OpenAPI. The schema-generator directory contains support code which generates openapi-3.1.json from the OpenAPI 3.1 specification document (Markdown). + +### How to rebuild + +`protoc -I=. -I=third_party --go_out=. --go_opt=paths=source_relative openapiv3/*.proto` \ No newline at end of file diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go new file mode 100644 index 00000000..ae242f30 --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.pb.go @@ -0,0 +1,183 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.0 +// protoc v3.19.4 +// source: openapiv3/annotations.proto + +package openapi_v3 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + descriptorpb "google.golang.org/protobuf/types/descriptorpb" + reflect "reflect" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +var file_openapiv3_annotations_proto_extTypes = []protoimpl.ExtensionInfo{ + { + ExtendedType: (*descriptorpb.FileOptions)(nil), + ExtensionType: (*Document)(nil), + Field: 1143, + Name: "openapi.v3.document", + Tag: "bytes,1143,opt,name=document", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MethodOptions)(nil), + ExtensionType: (*Operation)(nil), + Field: 1143, + Name: "openapi.v3.operation", + Tag: "bytes,1143,opt,name=operation", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.MessageOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.schema", + Tag: "bytes,1143,opt,name=schema", + Filename: "openapiv3/annotations.proto", + }, + { + ExtendedType: (*descriptorpb.FieldOptions)(nil), + ExtensionType: (*Schema)(nil), + Field: 1143, + Name: "openapi.v3.property", + Tag: "bytes,1143,opt,name=property", + Filename: "openapiv3/annotations.proto", + }, +} + +// Extension fields to descriptorpb.FileOptions. +var ( + // optional openapi.v3.Document document = 1143; + E_Document = &file_openapiv3_annotations_proto_extTypes[0] +) + +// Extension fields to descriptorpb.MethodOptions. +var ( + // optional openapi.v3.Operation operation = 1143; + E_Operation = &file_openapiv3_annotations_proto_extTypes[1] +) + +// Extension fields to descriptorpb.MessageOptions. +var ( + // optional openapi.v3.Schema schema = 1143; + E_Schema = &file_openapiv3_annotations_proto_extTypes[2] +) + +// Extension fields to descriptorpb.FieldOptions. +var ( + // optional openapi.v3.Schema property = 1143; + E_Property = &file_openapiv3_annotations_proto_extTypes[3] +) + +var File_openapiv3_annotations_proto protoreflect.FileDescriptor + +var file_openapiv3_annotations_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x1a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x76, 0x33, 0x2f, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x76, 0x33, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3a, 0x4f, 0x0a, 0x08, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, + 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x64, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x3a, 0x54, 0x0a, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x09, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x4c, 0x0a, + 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x3a, 0x4e, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0xf7, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x33, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x42, 0x5a, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x33, 0x42, 0x10, 0x41, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x2e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2f, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x76, 0x33, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, + 0x33, 0xa2, 0x02, 0x03, 0x4f, 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_openapiv3_annotations_proto_goTypes = []interface{}{ + (*descriptorpb.FileOptions)(nil), // 0: google.protobuf.FileOptions + (*descriptorpb.MethodOptions)(nil), // 1: google.protobuf.MethodOptions + (*descriptorpb.MessageOptions)(nil), // 2: google.protobuf.MessageOptions + (*descriptorpb.FieldOptions)(nil), // 3: google.protobuf.FieldOptions + (*Document)(nil), // 4: openapi.v3.Document + (*Operation)(nil), // 5: openapi.v3.Operation + (*Schema)(nil), // 6: openapi.v3.Schema +} +var file_openapiv3_annotations_proto_depIdxs = []int32{ + 0, // 0: openapi.v3.document:extendee -> google.protobuf.FileOptions + 1, // 1: openapi.v3.operation:extendee -> google.protobuf.MethodOptions + 2, // 2: openapi.v3.schema:extendee -> google.protobuf.MessageOptions + 3, // 3: openapi.v3.property:extendee -> google.protobuf.FieldOptions + 4, // 4: openapi.v3.document:type_name -> openapi.v3.Document + 5, // 5: openapi.v3.operation:type_name -> openapi.v3.Operation + 6, // 6: openapi.v3.schema:type_name -> openapi.v3.Schema + 6, // 7: openapi.v3.property:type_name -> openapi.v3.Schema + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 4, // [4:8] is the sub-list for extension type_name + 0, // [0:4] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_openapiv3_annotations_proto_init() } +func file_openapiv3_annotations_proto_init() { + if File_openapiv3_annotations_proto != nil { + return + } + file_openapiv3_OpenAPIv3_proto_init() + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv3_annotations_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 4, + NumServices: 0, + }, + GoTypes: file_openapiv3_annotations_proto_goTypes, + DependencyIndexes: file_openapiv3_annotations_proto_depIdxs, + ExtensionInfos: file_openapiv3_annotations_proto_extTypes, + }.Build() + File_openapiv3_annotations_proto = out.File + file_openapiv3_annotations_proto_rawDesc = nil + file_openapiv3_annotations_proto_goTypes = nil + file_openapiv3_annotations_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gnostic/openapiv3/annotations.proto b/vendor/github.com/google/gnostic/openapiv3/annotations.proto new file mode 100644 index 00000000..0bd87810 --- /dev/null +++ b/vendor/github.com/google/gnostic/openapiv3/annotations.proto @@ -0,0 +1,60 @@ +// Copyright 2022 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package openapi.v3; + +import "openapiv3/OpenAPIv3.proto"; +import "google/protobuf/descriptor.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "AnnotationsProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v3"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "github.com/google/gnostic/openapiv3;openapi_v3"; + +extend google.protobuf.FileOptions { + Document document = 1143; +} + +extend google.protobuf.MethodOptions { + Operation operation = 1143; +} + +extend google.protobuf.MessageOptions { + Schema schema = 1143; +} + +extend google.protobuf.FieldOptions { + Schema property = 1143; +} \ No newline at end of file diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml index f8684d99..061d72ae 100644 --- a/vendor/github.com/google/gofuzz/.travis.yml +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -1,13 +1,10 @@ language: go go: - - 1.4 - - 1.3 - - 1.2 - - tip - -install: - - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + - 1.11.x + - 1.12.x + - 1.13.x + - master script: - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md index 51cf5cd1..97c1b34f 100644 --- a/vendor/github.com/google/gofuzz/CONTRIBUTING.md +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -1,7 +1,7 @@ # How to contribute # We'd love to accept your patches and contributions to this project. There are -a just a few small guidelines you need to follow. +just a few small guidelines you need to follow. ## Contributor License Agreement ## diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md index 386c2a45..b503aae7 100644 --- a/vendor/github.com/google/gofuzz/README.md +++ b/vendor/github.com/google/gofuzz/README.md @@ -68,4 +68,22 @@ f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. See more examples in ```example_test.go```. +You can use this library for easier [go-fuzz](https://github.com/dvyukov/go-fuzz)ing. +go-fuzz provides the user a byte-slice, which should be converted to different inputs +for the tested function. This library can help convert the byte slice. Consider for +example a fuzz test for a the function `mypackage.MyFunc` that takes an int arguments: +```go +// +build gofuzz +package mypackage + +import fuzz "github.com/google/gofuzz" + +func Fuzz(data []byte) int { + var i int + fuzz.NewFromGoFuzz(data).Fuzz(&i) + MyFunc(i) + return 0 +} +``` + Happy testing! diff --git a/vendor/github.com/google/gofuzz/bytesource/bytesource.go b/vendor/github.com/google/gofuzz/bytesource/bytesource.go new file mode 100644 index 00000000..5bb36594 --- /dev/null +++ b/vendor/github.com/google/gofuzz/bytesource/bytesource.go @@ -0,0 +1,81 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package bytesource provides a rand.Source64 that is determined by a slice of bytes. +package bytesource + +import ( + "bytes" + "encoding/binary" + "io" + "math/rand" +) + +// ByteSource implements rand.Source64 determined by a slice of bytes. The random numbers are +// generated from each 8 bytes in the slice, until the last bytes are consumed, from which a +// fallback pseudo random source is created in case more random numbers are required. +// It also exposes a `bytes.Reader` API, which lets callers consume the bytes directly. +type ByteSource struct { + *bytes.Reader + fallback rand.Source +} + +// New returns a new ByteSource from a given slice of bytes. +func New(input []byte) *ByteSource { + s := &ByteSource{ + Reader: bytes.NewReader(input), + fallback: rand.NewSource(0), + } + if len(input) > 0 { + s.fallback = rand.NewSource(int64(s.consumeUint64())) + } + return s +} + +func (s *ByteSource) Uint64() uint64 { + // Return from input if it was not exhausted. + if s.Len() > 0 { + return s.consumeUint64() + } + + // Input was exhausted, return random number from fallback (in this case fallback should not be + // nil). Try first having a Uint64 output (Should work in current rand implementation), + // otherwise return a conversion of Int63. + if s64, ok := s.fallback.(rand.Source64); ok { + return s64.Uint64() + } + return uint64(s.fallback.Int63()) +} + +func (s *ByteSource) Int63() int64 { + return int64(s.Uint64() >> 1) +} + +func (s *ByteSource) Seed(seed int64) { + s.fallback = rand.NewSource(seed) + s.Reader = bytes.NewReader(nil) +} + +// consumeUint64 reads 8 bytes from the input and convert them to a uint64. It assumes that the the +// bytes reader is not empty. +func (s *ByteSource) consumeUint64() uint64 { + var bytes [8]byte + _, err := s.Read(bytes[:]) + if err != nil && err != io.EOF { + panic("failed reading source") // Should not happen. + } + return binary.BigEndian.Uint64(bytes[:]) +} diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go index da0a5f93..761520a8 100644 --- a/vendor/github.com/google/gofuzz/fuzz.go +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -22,6 +22,9 @@ import ( "reflect" "regexp" "time" + + "github.com/google/gofuzz/bytesource" + "strings" ) // fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. @@ -61,6 +64,34 @@ func NewWithSeed(seed int64) *Fuzzer { return f } +// NewFromGoFuzz is a helper function that enables using gofuzz (this +// project) with go-fuzz (https://github.com/dvyukov/go-fuzz) for continuous +// fuzzing. Essentially, it enables translating the fuzzing bytes from +// go-fuzz to any Go object using this library. +// +// This implementation promises a constant translation from a given slice of +// bytes to the fuzzed objects. This promise will remain over future +// versions of Go and of this library. +// +// Note: the returned Fuzzer should not be shared between multiple goroutines, +// as its deterministic output will no longer be available. +// +// Example: use go-fuzz to test the function `MyFunc(int)` in the package +// `mypackage`. Add the file: "mypacakge_fuzz.go" with the content: +// +// // +build gofuzz +// package mypacakge +// import fuzz "github.com/google/gofuzz" +// func Fuzz(data []byte) int { +// var i int +// fuzz.NewFromGoFuzz(data).Fuzz(&i) +// MyFunc(i) +// return 0 +// } +func NewFromGoFuzz(data []byte) *Fuzzer { + return New().RandSource(bytesource.New(data)) +} + // Funcs adds each entry in fuzzFuncs as a custom fuzzing function. // // Each entry in fuzzFuncs must be a function taking two parameters. @@ -141,7 +172,7 @@ func (f *Fuzzer) genElementCount() int { } func (f *Fuzzer) genShouldFill() bool { - return f.r.Float64() > f.nilChance + return f.r.Float64() >= f.nilChance } // MaxDepth sets the maximum number of recursive fuzz calls that will be made @@ -240,6 +271,7 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { fn(v, fc.fuzzer.r) return } + switch v.Kind() { case reflect.Map: if fc.fuzzer.genShouldFill() { @@ -450,10 +482,10 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ v.SetFloat(r.Float64()) }, reflect.Complex64: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex128(complex(r.Float32(), r.Float32()))) }, reflect.Complex128: func(v reflect.Value, r *rand.Rand) { - panic("unimplemented") + v.SetComplex(complex(r.Float64(), r.Float64())) }, reflect.String: func(v reflect.Value, r *rand.Rand) { v.SetString(randString(r)) @@ -465,38 +497,105 @@ var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ // randBool returns true or false randomly. func randBool(r *rand.Rand) bool { - if r.Int()&1 == 1 { - return true - } - return false + return r.Int31()&(1<<30) == 0 +} + +type int63nPicker interface { + Int63n(int64) int64 } -type charRange struct { - first, last rune +// UnicodeRange describes a sequential range of unicode characters. +// Last must be numerically greater than First. +type UnicodeRange struct { + First, Last rune } +// UnicodeRanges describes an arbitrary number of sequential ranges of unicode characters. +// To be useful, each range must have at least one character (First <= Last) and +// there must be at least one range. +type UnicodeRanges []UnicodeRange + // choose returns a random unicode character from the given range, using the // given randomness source. -func (r *charRange) choose(rand *rand.Rand) rune { - count := int64(r.last - r.first) - return r.first + rune(rand.Int63n(count)) +func (ur UnicodeRange) choose(r int63nPicker) rune { + count := int64(ur.Last - ur.First + 1) + return ur.First + rune(r.Int63n(count)) +} + +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from the range ur. If there are no characters +// in the range (cr.Last < cr.First), this will panic. +func (ur UnicodeRange) CustomStringFuzzFunc() func(s *string, c Continue) { + ur.check() + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } } -var unicodeRanges = []charRange{ +// check is a function that used to check whether the first of ur(UnicodeRange) +// is greater than the last one. +func (ur UnicodeRange) check() { + if ur.Last < ur.First { + panic("The last encoding must be greater than the first one.") + } +} + +// randString of UnicodeRange makes a random string up to 20 characters long. +// Each character is selected form ur(UnicodeRange). +func (ur UnicodeRange) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur.choose(r)) + } + return sb.String() +} + +// defaultUnicodeRanges sets a default unicode range when user do not set +// CustomStringFuzzFunc() but wants fuzz string. +var defaultUnicodeRanges = UnicodeRanges{ {' ', '~'}, // ASCII characters {'\u00a0', '\u02af'}, // Multi-byte encoded characters {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) } +// CustomStringFuzzFunc constructs a FuzzFunc which produces random strings. +// Each character is selected from one of the ranges of ur(UnicodeRanges). +// Each range has an equal probability of being chosen. If there are no ranges, +// or a selected range has no characters (.Last < .First), this will panic. +// Do not modify any of the ranges in ur after calling this function. +func (ur UnicodeRanges) CustomStringFuzzFunc() func(s *string, c Continue) { + // Check unicode ranges slice is empty. + if len(ur) == 0 { + panic("UnicodeRanges is empty.") + } + // if not empty, each range should be checked. + for i := range ur { + ur[i].check() + } + return func(s *string, c Continue) { + *s = ur.randString(c.Rand) + } +} + +// randString of UnicodeRanges makes a random string up to 20 characters long. +// Each character is selected form one of the ranges of ur(UnicodeRanges), +// and each range has an equal probability of being chosen. +func (ur UnicodeRanges) randString(r *rand.Rand) string { + n := r.Intn(20) + sb := strings.Builder{} + sb.Grow(n) + for i := 0; i < n; i++ { + sb.WriteRune(ur[r.Intn(len(ur))].choose(r)) + } + return sb.String() +} + // randString makes a random string up to 20 characters long. The returned string // may include a variety of (valid) UTF-8 encodings. func randString(r *rand.Rand) string { - n := r.Intn(20) - runes := make([]rune, n) - for i := range runes { - runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) - } - return string(runes) + return defaultUnicodeRanges.randString(r) } // randUint64 makes random 64 bit numbers. diff --git a/vendor/github.com/imdario/mergo/.deepsource.toml b/vendor/github.com/imdario/mergo/.deepsource.toml new file mode 100644 index 00000000..8a0681af --- /dev/null +++ b/vendor/github.com/imdario/mergo/.deepsource.toml @@ -0,0 +1,12 @@ +version = 1 + +test_patterns = [ + "*_test.go" +] + +[[analyzers]] +name = "go" +enabled = true + + [analyzers.meta] + import_path = "github.com/imdario/mergo" \ No newline at end of file diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml index b13a50ed..d324c43b 100644 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ b/vendor/github.com/imdario/mergo/.travis.yml @@ -1,7 +1,12 @@ language: go +arch: + - amd64 + - ppc64le install: - go get -t - go get golang.org/x/tools/cmd/cover - go get github.com/mattn/goveralls script: + - go test -race -v ./... +after_script: - $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 8b76f1fb..7e6f7aee 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -1,51 +1,61 @@ # Mergo -A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. - -Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. - -## Status - -It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). [![GoDoc][3]][4] -[![GoCard][5]][6] +[![GitHub release][5]][6] +[![GoCard][7]][8] [![Build Status][1]][2] -[![Coverage Status][7]][8] -[![Sourcegraph][9]][10] +[![Coverage Status][9]][10] +[![Sourcegraph][11]][12] +[![FOSSA Status][13]][14] +[![Become my sponsor][15]][16] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo -[5]: https://goreportcard.com/badge/imdario/mergo -[6]: https://goreportcard.com/report/github.com/imdario/mergo -[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master -[8]: https://coveralls.io/github/imdario/mergo?branch=master -[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg -[10]: https://sourcegraph.com/github.com/imdario/mergo?badge +[5]: https://img.shields.io/github/release/imdario/mergo.svg +[6]: https://github.com/imdario/mergo/releases +[7]: https://goreportcard.com/badge/imdario/mergo +[8]: https://goreportcard.com/report/github.com/imdario/mergo +[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[10]: https://coveralls.io/github/imdario/mergo?branch=master +[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[12]: https://sourcegraph.com/github.com/imdario/mergo?badge +[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield +[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield +[15]: https://img.shields.io/github/sponsors/imdario +[16]: https://github.com/sponsors/imdario + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -### Latest release +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). -[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6). +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild). ### Important note -Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules. + +Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code. -If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). ### Donations -If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: +If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes: Buy Me a Coffee at ko-fi.com -[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) -[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) Donate using Liberapay +Become my sponsor ### Mergo in the wild +- [cli/cli](https://github.com/cli/cli) - [moby/moby](https://github.com/moby/moby) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [vmware/dispatch](https://github.com/vmware/dispatch) @@ -86,8 +96,11 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [bukalapak/snowboard](https://github.com/bukalapak/snowboard) +- [containerssh/containerssh](https://github.com/containerssh/containerssh) +- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser) +- [tjpnz/structbot](https://github.com/tjpnz/structbot) -## Installation +## Install go get github.com/imdario/mergo @@ -98,7 +111,7 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -124,9 +137,7 @@ if err := mergo.Map(&dst, srcMap); err != nil { Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. -More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). - -### Nice example +Here is a nice example: ```go package main @@ -158,7 +169,7 @@ func main() { Note: if test are failing due missing package, please execute: - go get gopkg.in/yaml.v2 + go get gopkg.in/yaml.v3 ### Transformers @@ -174,10 +185,10 @@ import ( "time" ) -type timeTransfomer struct { +type timeTransformer struct { } -func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { +func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { if typ == reflect.TypeOf(time.Time{}) { return func(dst, src reflect.Value) error { if dst.CanSet() { @@ -201,14 +212,13 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } ``` - ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) @@ -220,3 +230,6 @@ Written by [Dario Castañé](http://dario.im). ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). + + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large) diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go index 6e9aa7ba..fcd985f9 100644 --- a/vendor/github.com/imdario/mergo/doc.go +++ b/vendor/github.com/imdario/mergo/doc.go @@ -4,41 +4,140 @@ // license that can be found in the LICENSE file. /* -Package mergo merges same-type structs and maps by setting default values in zero-value fields. +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. -Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). +Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Status + +It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc. + +Important note + +Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules. + +Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code. + +If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u github.com/imdario/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0). + +Install + +Do your usual installation procedure: + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) Usage -From my own work-in-progress project: +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + + if err := mergo.Merge(&dst, src); err != nil { + // ... + } + +Also, you can merge overwriting values using the transformer WithOverride. + + if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... + } + +Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. + + if err := mergo.Map(&dst, srcMap); err != nil { + // ... + } + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. + +Here is a nice example: + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + ) - type networkConfig struct { - Protocol string - Address string - ServerType string `json: "server_type"` - Port uint16 + type Foo struct { + A string + B int64 } - type FssnConfig struct { - Network networkConfig + func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} } - var fssnDefault = FssnConfig { - networkConfig { - "tcp", - "127.0.0.1", - "http", - 31560, - }, +Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time? + + package main + + import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" + ) + + type timeTransformer struct { } - // Inside a function [...] + func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil + } + + type Snapshot struct { + Time time.Time + // ... + } - if err := mergo.Merge(&config, fssnDefault); err != nil { - log.Fatal(err) + func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } } - // More code [...] +Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario + +About + +Written by Dario Castañé: https://da.rio.hn + +License + +BSD 3-Clause license, as Go language. */ package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 6ea38e63..a13a7ee4 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -72,6 +72,7 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { + config.overwriteWithEmptyValue = true srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) @@ -140,6 +141,9 @@ func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { } func _map(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 44f70a89..8b4e2f47 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -13,22 +13,39 @@ import ( "reflect" ) -func hasExportedField(dst reflect.Value) (exported bool) { +func hasMergeableFields(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { field := dst.Type().Field(i) if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { - exported = exported || hasExportedField(dst.Field(i)) - } else { + exported = exported || hasMergeableFields(dst.Field(i)) + } else if isExportedComponent(&field) { exported = exported || len(field.PkgPath) == 0 } } return } +func isExportedComponent(field *reflect.StructField) bool { + pkgPath := field.PkgPath + if len(pkgPath) > 0 { + return false + } + c := field.Name[0] + if 'a' <= c && c <= 'z' || c == '_' { + return false + } + return true +} + type Config struct { - Overwrite bool - AppendSlice bool - Transformers Transformers + Overwrite bool + AppendSlice bool + TypeCheck bool + Transformers Transformers + overwriteWithEmptyValue bool + overwriteSliceWithEmptyValue bool + sliceDeepCopy bool + debug bool } type Transformers interface { @@ -40,6 +57,10 @@ type Transformers interface { // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { overwrite := config.Overwrite + typeCheck := config.TypeCheck + overwriteWithEmptySrc := config.overwriteWithEmptyValue + overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue + sliceDeepCopy := config.sliceDeepCopy if !src.IsValid() { return @@ -58,7 +79,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.Transformers != nil && !isEmptyValue(dst) { + if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() { if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return @@ -67,21 +88,34 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch dst.Kind() { case reflect.Struct: - if hasExportedField(dst) { + if hasMergeableFields(dst) { for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { return } } } else { - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) { dst.Set(src) } } case reflect.Map: if dst.IsNil() && !src.IsNil() { - dst.Set(reflect.MakeMap(dst.Type())) + if dst.CanSet() { + dst.Set(reflect.MakeMap(dst.Type())) + } else { + dst = src + return + } } + + if src.Kind() != reflect.Map { + if overwrite { + dst.Set(src) + } + return + } + for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { @@ -91,6 +125,9 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { + if overwrite { + dst.SetMapIndex(key, srcElement) + } continue } fallthrough @@ -125,22 +162,43 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co dstSlice = reflect.ValueOf(dstElement.Interface()) } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { + if typeCheck && srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } dstSlice = srcSlice } else if config.AppendSlice { if srcSlice.Type() != dstSlice.Type() { - return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) } dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } else if sliceDeepCopy { + i := 0 + for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ { + srcElement := srcSlice.Index(i) + dstElement := dstSlice.Index(i) + + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } + } dst.SetMapIndex(key, dstSlice) } } - if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + if dstElement.IsValid() && !isEmptyValue(dstElement) && (reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map || reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice) { continue } - if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement)) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -151,22 +209,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co if !dst.CanSet() { break } - if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + if (!isEmptyValue(src) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice && !sliceDeepCopy { dst.Set(src) } else if config.AppendSlice { if src.Type() != dst.Type() { return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) } dst.Set(reflect.AppendSlice(dst, src)) + } else if sliceDeepCopy { + for i := 0; i < src.Len() && i < dst.Len(); i++ { + srcElement := src.Index(i) + dstElement := dst.Index(i) + if srcElement.CanInterface() { + srcElement = reflect.ValueOf(srcElement.Interface()) + } + if dstElement.CanInterface() { + dstElement = reflect.ValueOf(dstElement.Interface()) + } + + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } } case reflect.Ptr: fallthrough case reflect.Interface: - if src.IsNil() { + if isReflectNil(src) { + if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) { + dst.Set(src) + } break } + if src.Kind() != reflect.Interface { - if dst.IsNil() || overwrite { + if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } @@ -183,18 +260,31 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } break } + if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } - } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { - return + break + } + + if dst.Elem().Kind() == src.Elem().Kind() { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + break } default: - if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { - dst.Set(src) + mustSet := (isEmptyValue(dst) || overwrite) && (!isEmptyValue(src) || overwriteWithEmptySrc) + if mustSet { + if dst.CanSet() { + dst.Set(src) + } else { + dst = src + } } } + return } @@ -206,7 +296,7 @@ func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } -// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { @@ -225,12 +315,37 @@ func WithOverride(config *Config) { config.Overwrite = true } -// WithAppendSlice will make merge append slices instead of overwriting it +// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values. +func WithOverwriteWithEmptyValue(config *Config) { + config.Overwrite = true + config.overwriteWithEmptyValue = true +} + +// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice. +func WithOverrideEmptySlice(config *Config) { + config.overwriteSliceWithEmptyValue = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it. func WithAppendSlice(config *Config) { config.AppendSlice = true } +// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride). +func WithTypeCheck(config *Config) { + config.TypeCheck = true +} + +// WithSliceDeepCopy will merge slice element one by one with Overwrite flag. +func WithSliceDeepCopy(config *Config) { + config.sliceDeepCopy = true + config.Overwrite = true +} + func merge(dst, src interface{}, opts ...func(*Config)) error { + if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr { + return ErrNonPointerAgument + } var ( vDst, vSrc reflect.Value err error @@ -250,3 +365,16 @@ func merge(dst, src interface{}, opts ...func(*Config)) error { } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) } + +// IsReflectNil is the reflect value provided nil +func isReflectNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr: + // Both interface and slice are nil if first word is 0. + // Both are always bigger than a word; assume flagIndir. + return v.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go index a82fea2f..9fe362d4 100644 --- a/vendor/github.com/imdario/mergo/mergo.go +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -17,9 +17,10 @@ import ( var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") - ErrNotSupported = errors.New("only structs and maps are supported") + ErrNotSupported = errors.New("only structs, maps, and slices are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") + ErrNonPointerAgument = errors.New("dst must be a pointer") ) // During deepMerge, must keep track of checks that are @@ -64,7 +65,7 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { return } vDst = reflect.ValueOf(dst).Elem() - if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice { err = ErrNotSupported return } @@ -75,23 +76,3 @@ func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { } return } - -// Traverses recursively both values, assigning src's fields values to dst. -// The map argument tracks comparisons that have already been seen, which allows -// short circuiting on recursive types. -func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { - if dst.CanAddr() { - addr := dst.UnsafeAddr() - h := 17 * addr - seen := visited[h] - typ := dst.Type() - for p := seen; p != nil; p = p.next { - if p.ptr == addr && p.typ == typ { - return nil - } - } - // Remember, remember... - visited[h] = &visit{addr, typ, seen} - } - return // TODO refactor -} diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index a42e9d65..b5f5e261 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -401,6 +401,7 @@ func (r *Lexer) scanToken() { // consume resets the current token to allow scanning the next one. func (r *Lexer) consume() { r.token.kind = tokenUndef + r.token.byteValueCloned = false r.token.delimValue = 0 } @@ -528,6 +529,7 @@ func (r *Lexer) Skip() { func (r *Lexer) SkipRecursive() { r.scanToken() var start, end byte + startPos := r.start switch r.token.delimValue { case '{': @@ -553,6 +555,14 @@ func (r *Lexer) SkipRecursive() { level-- if level == 0 { r.pos += i + 1 + if !json.Valid(r.Data[startPos:r.pos]) { + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "skipped array/object json value is invalid", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } + } return } case c == '\\' && inQuotes: @@ -702,6 +712,10 @@ func (r *Lexer) Bytes() []byte { r.errInvalidToken("string") return nil } + if err := r.unescapeStringToken(); err != nil { + r.errInvalidToken("string") + return nil + } ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) n, err := base64.StdEncoding.Decode(ret, r.token.byteValue) if err != nil { diff --git a/vendor/github.com/nats-io/jwt/v2/LICENSE b/vendor/github.com/nats-io/jwt/v2/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/jwt/v2/Makefile b/vendor/github.com/nats-io/jwt/v2/Makefile new file mode 100644 index 00000000..108c1a45 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/Makefile @@ -0,0 +1,18 @@ +.PHONY: test cover + +build: + go build + +fmt: + gofmt -w -s *.go + goimports -w *.go + go mod tidy + +test: + go vet ./... + staticcheck ./... + rm -rf ./coverage.out + go test -v -coverprofile=./coverage.out ./... + +cover: + go tool cover -html=coverage.out \ No newline at end of file diff --git a/vendor/github.com/nats-io/jwt/v2/account_claims.go b/vendor/github.com/nats-io/jwt/v2/account_claims.go new file mode 100644 index 00000000..15731d3f --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/account_claims.go @@ -0,0 +1,354 @@ +/* + * Copyright 2018-2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "errors" + "sort" + "time" + + "github.com/nats-io/nkeys" +) + +// NoLimit is used to indicate a limit field is unlimited in value. +const NoLimit = -1 + +type AccountLimits struct { + Imports int64 `json:"imports,omitempty"` // Max number of imports + Exports int64 `json:"exports,omitempty"` // Max number of exports + WildcardExports bool `json:"wildcards,omitempty"` // Are wildcards allowed in exports + DisallowBearer bool `json:"disallow_bearer,omitempty"` // User JWT can't be bearer token + Conn int64 `json:"conn,omitempty"` // Max number of active connections + LeafNodeConn int64 `json:"leaf,omitempty"` // Max number of active leaf node connections +} + +// IsUnlimited returns true if all limits are unlimited +func (a *AccountLimits) IsUnlimited() bool { + return *a == AccountLimits{NoLimit, NoLimit, true, false, NoLimit, NoLimit} +} + +type NatsLimits struct { + Subs int64 `json:"subs,omitempty"` // Max number of subscriptions + Data int64 `json:"data,omitempty"` // Max number of bytes + Payload int64 `json:"payload,omitempty"` // Max message payload +} + +// IsUnlimited returns true if all limits are unlimited +func (n *NatsLimits) IsUnlimited() bool { + return *n == NatsLimits{NoLimit, NoLimit, NoLimit} +} + +type JetStreamLimits struct { + MemoryStorage int64 `json:"mem_storage,omitempty"` // Max number of bytes stored in memory across all streams. (0 means disabled) + DiskStorage int64 `json:"disk_storage,omitempty"` // Max number of bytes stored on disk across all streams. (0 means disabled) + Streams int64 `json:"streams,omitempty"` // Max number of streams + Consumer int64 `json:"consumer,omitempty"` // Max number of consumers + MaxAckPending int64 `json:"max_ack_pending,omitempty"` // Max ack pending of a Stream + MemoryMaxStreamBytes int64 `json:"mem_max_stream_bytes,omitempty"` // Max bytes a memory backed stream can have. (0 means disabled/unlimited) + DiskMaxStreamBytes int64 `json:"disk_max_stream_bytes,omitempty"` // Max bytes a disk backed stream can have. (0 means disabled/unlimited) + MaxBytesRequired bool `json:"max_bytes_required,omitempty"` // Max bytes required by all Streams +} + +// IsUnlimited returns true if all limits are unlimited +func (j *JetStreamLimits) IsUnlimited() bool { + lim := *j + // workaround in case NoLimit was used instead of 0 + if lim.MemoryMaxStreamBytes < 0 { + lim.MemoryMaxStreamBytes = 0 + } + if lim.DiskMaxStreamBytes < 0 { + lim.DiskMaxStreamBytes = 0 + } + if lim.MaxAckPending < 0 { + lim.MaxAckPending = 0 + } + return lim == JetStreamLimits{NoLimit, NoLimit, NoLimit, NoLimit, 0, 0, 0, false} +} + +type JetStreamTieredLimits map[string]JetStreamLimits + +// OperatorLimits are used to limit access by an account +type OperatorLimits struct { + NatsLimits + AccountLimits + JetStreamLimits + JetStreamTieredLimits `json:"tiered_limits,omitempty"` +} + +// IsJSEnabled returns if this account claim has JS enabled either through a tier or the non tiered limits. +func (o *OperatorLimits) IsJSEnabled() bool { + if len(o.JetStreamTieredLimits) > 0 { + for _, l := range o.JetStreamTieredLimits { + if l.MemoryStorage != 0 || l.DiskStorage != 0 { + return true + } + } + return false + } + l := o.JetStreamLimits + return l.MemoryStorage != 0 || l.DiskStorage != 0 +} + +// IsEmpty returns true if all limits are 0/false/empty. +func (o *OperatorLimits) IsEmpty() bool { + return o.NatsLimits == NatsLimits{} && + o.AccountLimits == AccountLimits{} && + o.JetStreamLimits == JetStreamLimits{} && + len(o.JetStreamTieredLimits) == 0 +} + +// IsUnlimited returns true if all limits are unlimited +func (o *OperatorLimits) IsUnlimited() bool { + return o.AccountLimits.IsUnlimited() && o.NatsLimits.IsUnlimited() && + o.JetStreamLimits.IsUnlimited() && len(o.JetStreamTieredLimits) == 0 +} + +// Validate checks that the operator limits contain valid values +func (o *OperatorLimits) Validate(vr *ValidationResults) { + // negative values mean unlimited, so all numbers are valid + if len(o.JetStreamTieredLimits) > 0 { + if (o.JetStreamLimits != JetStreamLimits{}) { + vr.AddError("JetStream Limits and tiered JetStream Limits are mutually exclusive") + } + if _, ok := o.JetStreamTieredLimits[""]; ok { + vr.AddError(`Tiered JetStream Limits can not contain a blank "" tier name`) + } + } +} + +// Mapping for publishes +type WeightedMapping struct { + Subject Subject `json:"subject"` + Weight uint8 `json:"weight,omitempty"` + Cluster string `json:"cluster,omitempty"` +} + +func (m *WeightedMapping) GetWeight() uint8 { + if m.Weight == 0 { + return 100 + } + return m.Weight +} + +type Mapping map[Subject][]WeightedMapping + +func (m *Mapping) Validate(vr *ValidationResults) { + for ubFrom, wm := range (map[Subject][]WeightedMapping)(*m) { + ubFrom.Validate(vr) + total := uint8(0) + for _, wm := range wm { + wm.Subject.Validate(vr) + if wm.Subject.HasWildCards() { + vr.AddError("Subject %q in weighted mapping %q is not allowed to contains wildcard", + string(wm.Subject), ubFrom) + } + total += wm.GetWeight() + } + if total > 100 { + vr.AddError("Mapping %q exceeds 100%% among all of it's weighted to mappings", ubFrom) + } + } +} + +func (a *Account) AddMapping(sub Subject, to ...WeightedMapping) { + a.Mappings[sub] = to +} + +// Account holds account specific claims data +type Account struct { + Imports Imports `json:"imports,omitempty"` + Exports Exports `json:"exports,omitempty"` + Limits OperatorLimits `json:"limits,omitempty"` + SigningKeys SigningKeys `json:"signing_keys,omitempty"` + Revocations RevocationList `json:"revocations,omitempty"` + DefaultPermissions Permissions `json:"default_permissions,omitempty"` + Mappings Mapping `json:"mappings,omitempty"` + Info + GenericFields +} + +// Validate checks if the account is valid, based on the wrapper +func (a *Account) Validate(acct *AccountClaims, vr *ValidationResults) { + a.Imports.Validate(acct.Subject, vr) + a.Exports.Validate(vr) + a.Limits.Validate(vr) + a.DefaultPermissions.Validate(vr) + a.Mappings.Validate(vr) + + if !a.Limits.IsEmpty() && a.Limits.Imports >= 0 && int64(len(a.Imports)) > a.Limits.Imports { + vr.AddError("the account contains more imports than allowed by the operator") + } + + // Check Imports and Exports for limit violations. + if a.Limits.Imports != NoLimit { + if int64(len(a.Imports)) > a.Limits.Imports { + vr.AddError("the account contains more imports than allowed by the operator") + } + } + if a.Limits.Exports != NoLimit { + if int64(len(a.Exports)) > a.Limits.Exports { + vr.AddError("the account contains more exports than allowed by the operator") + } + // Check for wildcard restrictions + if !a.Limits.WildcardExports { + for _, ex := range a.Exports { + if ex.Subject.HasWildCards() { + vr.AddError("the account contains wildcard exports that are not allowed by the operator") + } + } + } + } + a.SigningKeys.Validate(vr) + a.Info.Validate(vr) +} + +// AccountClaims defines the body of an account JWT +type AccountClaims struct { + ClaimsData + Account `json:"nats,omitempty"` +} + +// NewAccountClaims creates a new account JWT +func NewAccountClaims(subject string) *AccountClaims { + if subject == "" { + return nil + } + c := &AccountClaims{} + c.SigningKeys = make(SigningKeys) + // Set to unlimited to start. We do it this way so we get compiler + // errors if we add to the OperatorLimits. + c.Limits = OperatorLimits{ + NatsLimits{NoLimit, NoLimit, NoLimit}, + AccountLimits{NoLimit, NoLimit, true, false, NoLimit, NoLimit}, + JetStreamLimits{0, 0, 0, 0, 0, 0, 0, false}, + JetStreamTieredLimits{}, + } + c.Subject = subject + c.Mappings = Mapping{} + return c +} + +// Encode converts account claims into a JWT string +func (a *AccountClaims) Encode(pair nkeys.KeyPair) (string, error) { + if !nkeys.IsValidPublicAccountKey(a.Subject) { + return "", errors.New("expected subject to be account public key") + } + sort.Sort(a.Exports) + sort.Sort(a.Imports) + a.Type = AccountClaim + return a.ClaimsData.encode(pair, a) +} + +// DecodeAccountClaims decodes account claims from a JWT string +func DecodeAccountClaims(token string) (*AccountClaims, error) { + claims, err := Decode(token) + if err != nil { + return nil, err + } + ac, ok := claims.(*AccountClaims) + if !ok { + return nil, errors.New("not account claim") + } + return ac, nil +} + +func (a *AccountClaims) String() string { + return a.ClaimsData.String(a) +} + +// Payload pulls the accounts specific payload out of the claims +func (a *AccountClaims) Payload() interface{} { + return &a.Account +} + +// Validate checks the accounts contents +func (a *AccountClaims) Validate(vr *ValidationResults) { + a.ClaimsData.Validate(vr) + a.Account.Validate(a, vr) + + if nkeys.IsValidPublicAccountKey(a.ClaimsData.Issuer) { + if !a.Limits.IsEmpty() { + vr.AddWarning("self-signed account JWTs shouldn't contain operator limits") + } + } +} + +func (a *AccountClaims) ClaimType() ClaimType { + return a.Type +} + +func (a *AccountClaims) updateVersion() { + a.GenericFields.Version = libVersion +} + +// ExpectedPrefixes defines the types that can encode an account jwt, account and operator +func (a *AccountClaims) ExpectedPrefixes() []nkeys.PrefixByte { + return []nkeys.PrefixByte{nkeys.PrefixByteAccount, nkeys.PrefixByteOperator} +} + +// Claims returns the accounts claims data +func (a *AccountClaims) Claims() *ClaimsData { + return &a.ClaimsData +} + +// DidSign checks the claims against the account's public key and its signing keys +func (a *AccountClaims) DidSign(uc Claims) bool { + if uc != nil { + issuer := uc.Claims().Issuer + if issuer == a.Subject { + return true + } + return a.SigningKeys.Contains(issuer) + } + return false +} + +// Revoke enters a revocation by public key using time.Now(). +func (a *AccountClaims) Revoke(pubKey string) { + a.RevokeAt(pubKey, time.Now()) +} + +// RevokeAt enters a revocation by public key and timestamp into this account +// This will revoke all jwt issued for pubKey, prior to timestamp +// If there is already a revocation for this public key that is newer, it is kept. +// The value is expected to be a public key or "*" (means all public keys) +func (a *AccountClaims) RevokeAt(pubKey string, timestamp time.Time) { + if a.Revocations == nil { + a.Revocations = RevocationList{} + } + a.Revocations.Revoke(pubKey, timestamp) +} + +// ClearRevocation removes any revocation for the public key +func (a *AccountClaims) ClearRevocation(pubKey string) { + a.Revocations.ClearRevocation(pubKey) +} + +// isRevoked checks if the public key is in the revoked list with a timestamp later than the one passed in. +// Generally this method is called with the subject and issue time of the jwt to be tested. +// DO NOT pass time.Now(), it will not produce a stable/expected response. +func (a *AccountClaims) isRevoked(pubKey string, claimIssuedAt time.Time) bool { + return a.Revocations.IsRevoked(pubKey, claimIssuedAt) +} + +// IsClaimRevoked checks if the account revoked the claim passed in. +// Invalid claims (nil, no Subject or IssuedAt) will return true. +func (a *AccountClaims) IsClaimRevoked(claim *UserClaims) bool { + if claim == nil || claim.IssuedAt == 0 || claim.Subject == "" { + return true + } + return a.isRevoked(claim.Subject, time.Unix(claim.IssuedAt, 0)) +} diff --git a/vendor/github.com/nats-io/jwt/v2/activation_claims.go b/vendor/github.com/nats-io/jwt/v2/activation_claims.go new file mode 100644 index 00000000..827658ef --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/activation_claims.go @@ -0,0 +1,178 @@ +/* + * Copyright 2018 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "crypto/sha256" + "encoding/base32" + "errors" + "fmt" + "strings" + + "github.com/nats-io/nkeys" +) + +// Activation defines the custom parts of an activation claim +type Activation struct { + ImportSubject Subject `json:"subject,omitempty"` + ImportType ExportType `json:"kind,omitempty"` + // IssuerAccount stores the public key for the account the issuer represents. + // When set, the claim was issued by a signing key. + IssuerAccount string `json:"issuer_account,omitempty"` + GenericFields +} + +// IsService returns true if an Activation is for a service +func (a *Activation) IsService() bool { + return a.ImportType == Service +} + +// IsStream returns true if an Activation is for a stream +func (a *Activation) IsStream() bool { + return a.ImportType == Stream +} + +// Validate checks the exports and limits in an activation JWT +func (a *Activation) Validate(vr *ValidationResults) { + if !a.IsService() && !a.IsStream() { + vr.AddError("invalid import type: %q", a.ImportType) + } + + a.ImportSubject.Validate(vr) +} + +// ActivationClaims holds the data specific to an activation JWT +type ActivationClaims struct { + ClaimsData + Activation `json:"nats,omitempty"` +} + +// NewActivationClaims creates a new activation claim with the provided sub +func NewActivationClaims(subject string) *ActivationClaims { + if subject == "" { + return nil + } + ac := &ActivationClaims{} + ac.Subject = subject + return ac +} + +// Encode turns an activation claim into a JWT strimg +func (a *ActivationClaims) Encode(pair nkeys.KeyPair) (string, error) { + if !nkeys.IsValidPublicAccountKey(a.ClaimsData.Subject) { + return "", errors.New("expected subject to be an account") + } + a.Type = ActivationClaim + return a.ClaimsData.encode(pair, a) +} + +// DecodeActivationClaims tries to create an activation claim from a JWT string +func DecodeActivationClaims(token string) (*ActivationClaims, error) { + claims, err := Decode(token) + if err != nil { + return nil, err + } + ac, ok := claims.(*ActivationClaims) + if !ok { + return nil, errors.New("not activation claim") + } + return ac, nil +} + +// Payload returns the activation specific part of the JWT +func (a *ActivationClaims) Payload() interface{} { + return a.Activation +} + +// Validate checks the claims +func (a *ActivationClaims) Validate(vr *ValidationResults) { + a.validateWithTimeChecks(vr, true) +} + +// Validate checks the claims +func (a *ActivationClaims) validateWithTimeChecks(vr *ValidationResults, timeChecks bool) { + if timeChecks { + a.ClaimsData.Validate(vr) + } + a.Activation.Validate(vr) + if a.IssuerAccount != "" && !nkeys.IsValidPublicAccountKey(a.IssuerAccount) { + vr.AddError("account_id is not an account public key") + } +} + +func (a *ActivationClaims) ClaimType() ClaimType { + return a.Type +} + +func (a *ActivationClaims) updateVersion() { + a.GenericFields.Version = libVersion +} + +// ExpectedPrefixes defines the types that can sign an activation jwt, account and oeprator +func (a *ActivationClaims) ExpectedPrefixes() []nkeys.PrefixByte { + return []nkeys.PrefixByte{nkeys.PrefixByteAccount, nkeys.PrefixByteOperator} +} + +// Claims returns the generic part of the JWT +func (a *ActivationClaims) Claims() *ClaimsData { + return &a.ClaimsData +} + +func (a *ActivationClaims) String() string { + return a.ClaimsData.String(a) +} + +// HashID returns a hash of the claims that can be used to identify it. +// The hash is calculated by creating a string with +// issuerPubKey.subjectPubKey. and constructing the sha-256 hash and base32 encoding that. +// is the exported subject, minus any wildcards, so foo.* becomes foo. +// the one special case is that if the export start with "*" or is ">" the "_" +func (a *ActivationClaims) HashID() (string, error) { + + if a.Issuer == "" || a.Subject == "" || a.ImportSubject == "" { + return "", fmt.Errorf("not enough data in the activaion claims to create a hash") + } + + subject := cleanSubject(string(a.ImportSubject)) + base := fmt.Sprintf("%s.%s.%s", a.Issuer, a.Subject, subject) + h := sha256.New() + h.Write([]byte(base)) + sha := h.Sum(nil) + hash := base32.StdEncoding.EncodeToString(sha) + + return hash, nil +} + +func cleanSubject(subject string) string { + split := strings.Split(subject, ".") + cleaned := "" + + for i, tok := range split { + if tok == "*" || tok == ">" { + if i == 0 { + cleaned = "_" + break + } + + cleaned = strings.Join(split[:i], ".") + break + } + } + if cleaned == "" { + cleaned = subject + } + return cleaned +} diff --git a/vendor/github.com/nats-io/jwt/v2/claims.go b/vendor/github.com/nats-io/jwt/v2/claims.go new file mode 100644 index 00000000..3411e541 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/claims.go @@ -0,0 +1,273 @@ +/* + * Copyright 2018-2019 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "crypto/sha512" + "encoding/base32" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/nats-io/nkeys" +) + +// ClaimType is used to indicate the type of JWT being stored in a Claim +type ClaimType string + +const ( + // OperatorClaim is the type of an operator JWT + OperatorClaim = "operator" + // AccountClaim is the type of an Account JWT + AccountClaim = "account" + // UserClaim is the type of an user JWT + UserClaim = "user" + // ActivationClaim is the type of an activation JWT + ActivationClaim = "activation" + // GenericClaim is a type that doesn't match Operator/Account/User/ActionClaim + GenericClaim = "generic" +) + +func IsGenericClaimType(s string) bool { + switch s { + case OperatorClaim: + fallthrough + case AccountClaim: + fallthrough + case UserClaim: + fallthrough + case ActivationClaim: + return false + case GenericClaim: + return true + default: + return true + } +} + +// Claims is a JWT claims +type Claims interface { + Claims() *ClaimsData + Encode(kp nkeys.KeyPair) (string, error) + ExpectedPrefixes() []nkeys.PrefixByte + Payload() interface{} + String() string + Validate(vr *ValidationResults) + ClaimType() ClaimType + + verify(payload string, sig []byte) bool + updateVersion() +} + +type GenericFields struct { + Tags TagList `json:"tags,omitempty"` + Type ClaimType `json:"type,omitempty"` + Version int `json:"version,omitempty"` +} + +// ClaimsData is the base struct for all claims +type ClaimsData struct { + Audience string `json:"aud,omitempty"` + Expires int64 `json:"exp,omitempty"` + ID string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + Name string `json:"name,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Prefix holds the prefix byte for an NKey +type Prefix struct { + nkeys.PrefixByte +} + +func encodeToString(d []byte) string { + return base64.RawURLEncoding.EncodeToString(d) +} + +func decodeString(s string) ([]byte, error) { + return base64.RawURLEncoding.DecodeString(s) +} + +func serialize(v interface{}) (string, error) { + j, err := json.Marshal(v) + if err != nil { + return "", err + } + return encodeToString(j), nil +} + +func (c *ClaimsData) doEncode(header *Header, kp nkeys.KeyPair, claim Claims) (string, error) { + if header == nil { + return "", errors.New("header is required") + } + + if kp == nil { + return "", errors.New("keypair is required") + } + + if c != claim.Claims() { + return "", errors.New("claim and claim data do not match") + } + + if c.Subject == "" { + return "", errors.New("subject is not set") + } + + h, err := serialize(header) + if err != nil { + return "", err + } + + issuerBytes, err := kp.PublicKey() + if err != nil { + return "", err + } + + prefixes := claim.ExpectedPrefixes() + if prefixes != nil { + ok := false + for _, p := range prefixes { + switch p { + case nkeys.PrefixByteAccount: + if nkeys.IsValidPublicAccountKey(issuerBytes) { + ok = true + } + case nkeys.PrefixByteOperator: + if nkeys.IsValidPublicOperatorKey(issuerBytes) { + ok = true + } + case nkeys.PrefixByteServer: + if nkeys.IsValidPublicServerKey(issuerBytes) { + ok = true + } + case nkeys.PrefixByteCluster: + if nkeys.IsValidPublicClusterKey(issuerBytes) { + ok = true + } + case nkeys.PrefixByteUser: + if nkeys.IsValidPublicUserKey(issuerBytes) { + ok = true + } + } + } + if !ok { + return "", fmt.Errorf("unable to validate expected prefixes - %v", prefixes) + } + } + + c.Issuer = issuerBytes + c.IssuedAt = time.Now().UTC().Unix() + c.ID = "" // to create a repeatable hash + c.ID, err = c.hash() + if err != nil { + return "", err + } + + claim.updateVersion() + + payload, err := serialize(claim) + if err != nil { + return "", err + } + + toSign := fmt.Sprintf("%s.%s", h, payload) + eSig := "" + if header.Algorithm == AlgorithmNkeyOld { + return "", errors.New(AlgorithmNkeyOld + " not supported to write jwtV2") + } else if header.Algorithm == AlgorithmNkey { + sig, err := kp.Sign([]byte(toSign)) + if err != nil { + return "", err + } + eSig = encodeToString(sig) + } else { + return "", errors.New(header.Algorithm + " not supported to write jwtV2") + } + // hash need no padding + return fmt.Sprintf("%s.%s", toSign, eSig), nil +} + +func (c *ClaimsData) hash() (string, error) { + j, err := json.Marshal(c) + if err != nil { + return "", err + } + h := sha512.New512_256() + h.Write(j) + return base32.StdEncoding.WithPadding(base32.NoPadding).EncodeToString(h.Sum(nil)), nil +} + +// Encode encodes a claim into a JWT token. The claim is signed with the +// provided nkey's private key +func (c *ClaimsData) encode(kp nkeys.KeyPair, payload Claims) (string, error) { + return c.doEncode(&Header{TokenTypeJwt, AlgorithmNkey}, kp, payload) +} + +// Returns a JSON representation of the claim +func (c *ClaimsData) String(claim interface{}) string { + j, err := json.MarshalIndent(claim, "", " ") + if err != nil { + return "" + } + return string(j) +} + +func parseClaims(s string, target Claims) error { + h, err := decodeString(s) + if err != nil { + return err + } + return json.Unmarshal(h, &target) +} + +// Verify verifies that the encoded payload was signed by the +// provided public key. Verify is called automatically with +// the claims portion of the token and the public key in the claim. +// Client code need to insure that the public key in the +// claim is trusted. +func (c *ClaimsData) verify(payload string, sig []byte) bool { + // decode the public key + kp, err := nkeys.FromPublicKey(c.Issuer) + if err != nil { + return false + } + if err := kp.Verify([]byte(payload), sig); err != nil { + return false + } + return true +} + +// Validate checks a claim to make sure it is valid. Validity checks +// include expiration and not before constraints. +func (c *ClaimsData) Validate(vr *ValidationResults) { + now := time.Now().UTC().Unix() + if c.Expires > 0 && now > c.Expires { + vr.AddTimeCheck("claim is expired") + } + + if c.NotBefore > 0 && c.NotBefore > now { + vr.AddTimeCheck("claim is not yet valid") + } +} + +// IsSelfSigned returns true if the claims issuer is the subject +func (c *ClaimsData) IsSelfSigned() bool { + return c.Issuer == c.Subject +} diff --git a/vendor/github.com/nats-io/jwt/v2/creds_utils.go b/vendor/github.com/nats-io/jwt/v2/creds_utils.go new file mode 100644 index 00000000..94312ff5 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/creds_utils.go @@ -0,0 +1,265 @@ +/* + * Copyright 2019-2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" + "time" + + "github.com/nats-io/nkeys" +) + +// DecorateJWT returns a decorated JWT that describes the kind of JWT +func DecorateJWT(jwtString string) ([]byte, error) { + gc, err := Decode(jwtString) + if err != nil { + return nil, err + } + return formatJwt(string(gc.ClaimType()), jwtString) +} + +func formatJwt(kind string, jwtString string) ([]byte, error) { + templ := `-----BEGIN NATS %s JWT----- +%s +------END NATS %s JWT------ + +` + w := bytes.NewBuffer(nil) + kind = strings.ToUpper(kind) + _, err := fmt.Fprintf(w, templ, kind, jwtString, kind) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +// DecorateSeed takes a seed and returns a string that wraps +// the seed in the form: +// ************************* IMPORTANT ************************* +// NKEY Seed printed below can be used sign and prove identity. +// NKEYs are sensitive and should be treated as secrets. +// +// -----BEGIN USER NKEY SEED----- +// SUAIO3FHUX5PNV2LQIIP7TZ3N4L7TX3W53MQGEIVYFIGA635OZCKEYHFLM +// ------END USER NKEY SEED------ +func DecorateSeed(seed []byte) ([]byte, error) { + w := bytes.NewBuffer(nil) + ts := bytes.TrimSpace(seed) + pre := string(ts[0:2]) + kind := "" + switch pre { + case "SU": + kind = "USER" + case "SA": + kind = "ACCOUNT" + case "SO": + kind = "OPERATOR" + default: + return nil, errors.New("seed is not an operator, account or user seed") + } + header := `************************* IMPORTANT ************************* +NKEY Seed printed below can be used to sign and prove identity. +NKEYs are sensitive and should be treated as secrets. + +-----BEGIN %s NKEY SEED----- +` + _, err := fmt.Fprintf(w, header, kind) + if err != nil { + return nil, err + } + w.Write(ts) + + footer := ` +------END %s NKEY SEED------ + +************************************************************* +` + _, err = fmt.Fprintf(w, footer, kind) + if err != nil { + return nil, err + } + return w.Bytes(), nil +} + +var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}(\r?\n|\z)))`) + +// An user config file looks like this: +// -----BEGIN NATS USER JWT----- +// eyJ0eXAiOiJqd3QiLCJhbGciOiJlZDI1NTE5... +// ------END NATS USER JWT------ +// +// ************************* IMPORTANT ************************* +// NKEY Seed printed below can be used sign and prove identity. +// NKEYs are sensitive and should be treated as secrets. +// +// -----BEGIN USER NKEY SEED----- +// SUAIO3FHUX5PNV2LQIIP7TZ3N4L7TX3W53MQGEIVYFIGA635OZCKEYHFLM +// ------END USER NKEY SEED------ + +// FormatUserConfig returns a decorated file with a decorated JWT and decorated seed +func FormatUserConfig(jwtString string, seed []byte) ([]byte, error) { + gc, err := Decode(jwtString) + if err != nil { + return nil, err + } + if gc.ClaimType() != UserClaim { + return nil, fmt.Errorf("%q cannot be serialized as a user config", string(gc.ClaimType())) + } + + w := bytes.NewBuffer(nil) + + jd, err := formatJwt(string(gc.ClaimType()), jwtString) + if err != nil { + return nil, err + } + _, err = w.Write(jd) + if err != nil { + return nil, err + } + if !bytes.HasPrefix(bytes.TrimSpace(seed), []byte("SU")) { + return nil, fmt.Errorf("nkey seed is not an user seed") + } + + d, err := DecorateSeed(seed) + if err != nil { + return nil, err + } + _, err = w.Write(d) + if err != nil { + return nil, err + } + + return w.Bytes(), nil +} + +// ParseDecoratedJWT takes a creds file and returns the JWT portion. +func ParseDecoratedJWT(contents []byte) (string, error) { + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) == 0 { + return string(contents), nil + } + // First result should be the user JWT. + // We copy here so that if the file contained a seed file too we wipe appropriately. + raw := items[0][1] + tmp := make([]byte, len(raw)) + copy(tmp, raw) + return string(tmp), nil +} + +// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. +func ParseDecoratedNKey(contents []byte) (nkeys.KeyPair, error) { + var seed []byte + + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) > 1 { + seed = items[1][1] + } else { + lines := bytes.Split(contents, []byte("\n")) + for _, line := range lines { + if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) { + seed = line + break + } + } + } + if seed == nil { + return nil, errors.New("no nkey seed found") + } + if !bytes.HasPrefix(seed, []byte("SO")) && + !bytes.HasPrefix(seed, []byte("SA")) && + !bytes.HasPrefix(seed, []byte("SU")) { + return nil, errors.New("doesn't contain a seed nkey") + } + kp, err := nkeys.FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} + +// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys. +func ParseDecoratedUserNKey(contents []byte) (nkeys.KeyPair, error) { + nk, err := ParseDecoratedNKey(contents) + if err != nil { + return nil, err + } + seed, err := nk.Seed() + if err != nil { + return nil, err + } + if !bytes.HasPrefix(seed, []byte("SU")) { + return nil, errors.New("doesn't contain an user seed nkey") + } + kp, err := nkeys.FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} + +// IssueUserJWT takes an account scoped signing key, account id, and use public key (and optionally a user's name, an expiration duration and tags) and returns a valid signed JWT. +// The scopedSigningKey, is a mandatory account scoped signing nkey pair to sign the generated jwt (note that it _must_ be a signing key attached to the account (and a _scoped_ signing key), not the account's private (seed) key). +// The accountId, is a mandatory public account nkey. Will return error when not set or not account nkey. +// The publicUserKey, is a mandatory public user nkey. Will return error when not set or not user nkey. +// The name, is an optional human-readable name. When absent, default to publicUserKey. +// The expirationDuration, is an optional but recommended duration, when the generated jwt needs to expire. If not set, JWT will not expire. +// The tags, is an optional list of tags to be included in the JWT. +// +// Returns: +// string, resulting jwt. +// error, when issues arose. +func IssueUserJWT(scopedSigningKey nkeys.KeyPair, accountId string, publicUserKey string, name string, expirationDuration time.Duration, tags ...string) (string, error) { + + if !nkeys.IsValidPublicAccountKey(accountId) { + return "", errors.New("issueUserJWT requires an account key for the accountId parameter, but got " + nkeys.Prefix(accountId).String()) + } + + if !nkeys.IsValidPublicUserKey(publicUserKey) { + return "", errors.New("issueUserJWT requires an account key for the publicUserKey parameter, but got " + nkeys.Prefix(publicUserKey).String()) + } + + claim := NewUserClaims(publicUserKey) + claim.SetScoped(true) + + if expirationDuration != 0 { + claim.Expires = time.Now().Add(expirationDuration).UTC().Unix() + } + + claim.IssuerAccount = accountId + if name != "" { + claim.Name = name + } else { + claim.Name = publicUserKey + } + + claim.Subject = publicUserKey + claim.Tags = tags + + encoded, err := claim.Encode(scopedSigningKey) + if err != nil { + return "", errors.New("err encoding claim " + err.Error()) + } + + return encoded, nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/decoder.go b/vendor/github.com/nats-io/jwt/v2/decoder.go new file mode 100644 index 00000000..a887efde --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/decoder.go @@ -0,0 +1,156 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "errors" + "fmt" + "strings" + + "github.com/nats-io/nkeys" +) + +const libVersion = 2 + +type identifier struct { + Type ClaimType `json:"type,omitempty"` + GenericFields `json:"nats,omitempty"` +} + +func (i *identifier) Kind() ClaimType { + if i.Type != "" { + return i.Type + } + return i.GenericFields.Type +} + +func (i *identifier) Version() int { + if i.Type != "" { + return 1 + } + return i.GenericFields.Version +} + +type v1ClaimsDataDeletedFields struct { + Tags TagList `json:"tags,omitempty"` + Type ClaimType `json:"type,omitempty"` + IssuerAccount string `json:"issuer_account,omitempty"` +} + +// Decode takes a JWT string decodes it and validates it +// and return the embedded Claims. If the token header +// doesn't match the expected algorithm, or the claim is +// not valid or verification fails an error is returned. +func Decode(token string) (Claims, error) { + // must have 3 chunks + chunks := strings.Split(token, ".") + if len(chunks) != 3 { + return nil, errors.New("expected 3 chunks") + } + + // header + if _, err := parseHeaders(chunks[0]); err != nil { + return nil, err + } + // claim + data, err := decodeString(chunks[1]) + if err != nil { + return nil, err + } + ver, claim, err := loadClaims(data) + if err != nil { + return nil, err + } + + // sig + sig, err := decodeString(chunks[2]) + if err != nil { + return nil, err + } + + if ver <= 1 { + if !claim.verify(chunks[1], sig) { + return nil, errors.New("claim failed V1 signature verification") + } + } else { + if !claim.verify(token[:len(chunks[0])+len(chunks[1])+1], sig) { + return nil, errors.New("claim failed V2 signature verification") + } + } + + prefixes := claim.ExpectedPrefixes() + if prefixes != nil { + ok := false + issuer := claim.Claims().Issuer + for _, p := range prefixes { + switch p { + case nkeys.PrefixByteAccount: + if nkeys.IsValidPublicAccountKey(issuer) { + ok = true + } + case nkeys.PrefixByteOperator: + if nkeys.IsValidPublicOperatorKey(issuer) { + ok = true + } + case nkeys.PrefixByteUser: + if nkeys.IsValidPublicUserKey(issuer) { + ok = true + } + } + } + if !ok { + return nil, fmt.Errorf("unable to validate expected prefixes - %v", prefixes) + } + } + return claim, nil +} + +func loadClaims(data []byte) (int, Claims, error) { + var id identifier + if err := json.Unmarshal(data, &id); err != nil { + return -1, nil, err + } + + if id.Version() > libVersion { + return -1, nil, errors.New("JWT was generated by a newer version ") + } + + var claim Claims + var err error + switch id.Kind() { + case OperatorClaim: + claim, err = loadOperator(data, id.Version()) + case AccountClaim: + claim, err = loadAccount(data, id.Version()) + case UserClaim: + claim, err = loadUser(data, id.Version()) + case ActivationClaim: + claim, err = loadActivation(data, id.Version()) + case "cluster": + return -1, nil, errors.New("ClusterClaims are not supported") + case "server": + return -1, nil, errors.New("ServerClaims are not supported") + default: + var gc GenericClaims + if err := json.Unmarshal(data, &gc); err != nil { + return -1, nil, err + } + return -1, &gc, nil + } + + return id.Version(), claim, err +} diff --git a/vendor/github.com/nats-io/jwt/v2/decoder_account.go b/vendor/github.com/nats-io/jwt/v2/decoder_account.go new file mode 100644 index 00000000..025aa0e1 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/decoder_account.go @@ -0,0 +1,87 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" +) + +type v1NatsAccount struct { + Imports Imports `json:"imports,omitempty"` + Exports Exports `json:"exports,omitempty"` + Limits struct { + NatsLimits + AccountLimits + } `json:"limits,omitempty"` + SigningKeys StringList `json:"signing_keys,omitempty"` + Revocations RevocationList `json:"revocations,omitempty"` +} + +func loadAccount(data []byte, version int) (*AccountClaims, error) { + switch version { + case 1: + var v1a v1AccountClaims + if err := json.Unmarshal(data, &v1a); err != nil { + return nil, err + } + return v1a.Migrate() + case 2: + var v2a AccountClaims + v2a.SigningKeys = make(SigningKeys) + if err := json.Unmarshal(data, &v2a); err != nil { + return nil, err + } + if len(v2a.Limits.JetStreamTieredLimits) > 0 { + v2a.Limits.JetStreamLimits = JetStreamLimits{} + } + return &v2a, nil + default: + return nil, fmt.Errorf("library supports version %d or less - received %d", libVersion, version) + } +} + +type v1AccountClaims struct { + ClaimsData + v1ClaimsDataDeletedFields + v1NatsAccount `json:"nats,omitempty"` +} + +func (oa v1AccountClaims) Migrate() (*AccountClaims, error) { + return oa.migrateV1() +} + +func (oa v1AccountClaims) migrateV1() (*AccountClaims, error) { + var a AccountClaims + // copy the base claim + a.ClaimsData = oa.ClaimsData + // move the moved fields + a.Account.Type = oa.v1ClaimsDataDeletedFields.Type + a.Account.Tags = oa.v1ClaimsDataDeletedFields.Tags + // copy the account data + a.Account.Imports = oa.v1NatsAccount.Imports + a.Account.Exports = oa.v1NatsAccount.Exports + a.Account.Limits.AccountLimits = oa.v1NatsAccount.Limits.AccountLimits + a.Account.Limits.NatsLimits = oa.v1NatsAccount.Limits.NatsLimits + a.Account.Limits.JetStreamLimits = JetStreamLimits{} + a.Account.SigningKeys = make(SigningKeys) + for _, v := range oa.SigningKeys { + a.Account.SigningKeys.Add(v) + } + a.Account.Revocations = oa.v1NatsAccount.Revocations + a.Version = 1 + return &a, nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/decoder_activation.go b/vendor/github.com/nats-io/jwt/v2/decoder_activation.go new file mode 100644 index 00000000..d10dbf57 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/decoder_activation.go @@ -0,0 +1,77 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package jwt + +import ( + "encoding/json" + "fmt" +) + +// Migration adds GenericFields +type v1NatsActivation struct { + ImportSubject Subject `json:"subject,omitempty"` + ImportType ExportType `json:"type,omitempty"` + // Limit values deprecated inv v2 + Max int64 `json:"max,omitempty"` + Payload int64 `json:"payload,omitempty"` + Src string `json:"src,omitempty"` + Times []TimeRange `json:"times,omitempty"` +} + +type v1ActivationClaims struct { + ClaimsData + v1ClaimsDataDeletedFields + v1NatsActivation `json:"nats,omitempty"` +} + +func loadActivation(data []byte, version int) (*ActivationClaims, error) { + switch version { + case 1: + var v1a v1ActivationClaims + v1a.Max = NoLimit + v1a.Payload = NoLimit + if err := json.Unmarshal(data, &v1a); err != nil { + return nil, err + } + return v1a.Migrate() + case 2: + var v2a ActivationClaims + if err := json.Unmarshal(data, &v2a); err != nil { + return nil, err + } + return &v2a, nil + default: + return nil, fmt.Errorf("library supports version %d or less - received %d", libVersion, version) + } +} + +func (oa v1ActivationClaims) Migrate() (*ActivationClaims, error) { + return oa.migrateV1() +} + +func (oa v1ActivationClaims) migrateV1() (*ActivationClaims, error) { + var a ActivationClaims + // copy the base claim + a.ClaimsData = oa.ClaimsData + // move the moved fields + a.Activation.Type = oa.v1ClaimsDataDeletedFields.Type + a.Activation.Tags = oa.v1ClaimsDataDeletedFields.Tags + a.Activation.IssuerAccount = oa.v1ClaimsDataDeletedFields.IssuerAccount + // copy the activation data + a.ImportSubject = oa.ImportSubject + a.ImportType = oa.ImportType + a.Version = 1 + return &a, nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/decoder_operator.go b/vendor/github.com/nats-io/jwt/v2/decoder_operator.go new file mode 100644 index 00000000..511b204c --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/decoder_operator.go @@ -0,0 +1,73 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" +) + +type v1NatsOperator struct { + SigningKeys StringList `json:"signing_keys,omitempty"` + AccountServerURL string `json:"account_server_url,omitempty"` + OperatorServiceURLs StringList `json:"operator_service_urls,omitempty"` + SystemAccount string `json:"system_account,omitempty"` +} + +func loadOperator(data []byte, version int) (*OperatorClaims, error) { + switch version { + case 1: + var v1a v1OperatorClaims + if err := json.Unmarshal(data, &v1a); err != nil { + return nil, err + } + return v1a.Migrate() + case 2: + var v2a OperatorClaims + if err := json.Unmarshal(data, &v2a); err != nil { + return nil, err + } + return &v2a, nil + default: + return nil, fmt.Errorf("library supports version %d or less - received %d", libVersion, version) + } +} + +type v1OperatorClaims struct { + ClaimsData + v1ClaimsDataDeletedFields + v1NatsOperator `json:"nats,omitempty"` +} + +func (oa v1OperatorClaims) Migrate() (*OperatorClaims, error) { + return oa.migrateV1() +} + +func (oa v1OperatorClaims) migrateV1() (*OperatorClaims, error) { + var a OperatorClaims + // copy the base claim + a.ClaimsData = oa.ClaimsData + // move the moved fields + a.Operator.Type = oa.v1ClaimsDataDeletedFields.Type + a.Operator.Tags = oa.v1ClaimsDataDeletedFields.Tags + // copy the account data + a.Operator.SigningKeys = oa.v1NatsOperator.SigningKeys + a.Operator.AccountServerURL = oa.v1NatsOperator.AccountServerURL + a.Operator.OperatorServiceURLs = oa.v1NatsOperator.OperatorServiceURLs + a.Operator.SystemAccount = oa.v1NatsOperator.SystemAccount + a.Version = 1 + return &a, nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/decoder_user.go b/vendor/github.com/nats-io/jwt/v2/decoder_user.go new file mode 100644 index 00000000..e3e53c41 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/decoder_user.go @@ -0,0 +1,81 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" +) + +type v1User struct { + Permissions + Limits + BearerToken bool `json:"bearer_token,omitempty"` + // Limit values deprecated inv v2 + Max int64 `json:"max,omitempty"` +} + +type v1UserClaimsDataDeletedFields struct { + v1ClaimsDataDeletedFields + IssuerAccount string `json:"issuer_account,omitempty"` +} + +type v1UserClaims struct { + ClaimsData + v1UserClaimsDataDeletedFields + v1User `json:"nats,omitempty"` +} + +func loadUser(data []byte, version int) (*UserClaims, error) { + switch version { + case 1: + var v1a v1UserClaims + v1a.Limits = Limits{NatsLimits: NatsLimits{NoLimit, NoLimit, NoLimit}} + v1a.Max = NoLimit + if err := json.Unmarshal(data, &v1a); err != nil { + return nil, err + } + return v1a.Migrate() + case 2: + var v2a UserClaims + if err := json.Unmarshal(data, &v2a); err != nil { + return nil, err + } + return &v2a, nil + default: + return nil, fmt.Errorf("library supports version %d or less - received %d", libVersion, version) + } +} + +func (oa v1UserClaims) Migrate() (*UserClaims, error) { + return oa.migrateV1() +} + +func (oa v1UserClaims) migrateV1() (*UserClaims, error) { + var u UserClaims + // copy the base claim + u.ClaimsData = oa.ClaimsData + // move the moved fields + u.User.Type = oa.v1ClaimsDataDeletedFields.Type + u.User.Tags = oa.v1ClaimsDataDeletedFields.Tags + u.User.IssuerAccount = oa.IssuerAccount + // copy the user data + u.User.Permissions = oa.v1User.Permissions + u.User.Limits = oa.v1User.Limits + u.User.BearerToken = oa.v1User.BearerToken + u.Version = 1 + return &u, nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/exports.go b/vendor/github.com/nats-io/jwt/v2/exports.go new file mode 100644 index 00000000..24715f7c --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/exports.go @@ -0,0 +1,313 @@ +/* + * Copyright 2018-2019 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" + "strings" + "time" +) + +// ResponseType is used to store an export response type +type ResponseType string + +const ( + // ResponseTypeSingleton is used for a service that sends a single response only + ResponseTypeSingleton = "Singleton" + + // ResponseTypeStream is used for a service that will send multiple responses + ResponseTypeStream = "Stream" + + // ResponseTypeChunked is used for a service that sends a single response in chunks (so not quite a stream) + ResponseTypeChunked = "Chunked" +) + +// ServiceLatency is used when observing and exported service for +// latency measurements. +// Sampling 1-100, represents sampling rate, defaults to 100. +// Results is the subject where the latency metrics are published. +// A metric will be defined by the nats-server's ServiceLatency. Time durations +// are in nanoseconds. +// see https://github.com/nats-io/nats-server/blob/main/server/accounts.go#L524 +// e.g. +// { +// "app": "dlc22", +// "start": "2019-09-16T21:46:23.636869585-07:00", +// "svc": 219732, +// "nats": { +// "req": 320415, +// "resp": 228268, +// "sys": 0 +// }, +// "total": 768415 +// } +// +type ServiceLatency struct { + Sampling SamplingRate `json:"sampling"` + Results Subject `json:"results"` +} + +type SamplingRate int + +const Headers = SamplingRate(0) + +// MarshalJSON marshals the field as "headers" or percentages +func (r *SamplingRate) MarshalJSON() ([]byte, error) { + sr := *r + if sr == 0 { + return []byte(`"headers"`), nil + } + if sr >= 1 && sr <= 100 { + return []byte(fmt.Sprintf("%d", sr)), nil + } + return nil, fmt.Errorf("unknown sampling rate") +} + +// UnmarshalJSON unmashals numbers as percentages or "headers" +func (t *SamplingRate) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("empty sampling rate") + } + if strings.ToLower(string(b)) == `"headers"` { + *t = Headers + return nil + } + var j int + err := json.Unmarshal(b, &j) + if err != nil { + return err + } + *t = SamplingRate(j) + return nil +} + +func (sl *ServiceLatency) Validate(vr *ValidationResults) { + if sl.Sampling != 0 { + if sl.Sampling < 1 || sl.Sampling > 100 { + vr.AddError("sampling percentage needs to be between 1-100") + } + } + sl.Results.Validate(vr) + if sl.Results.HasWildCards() { + vr.AddError("results subject can not contain wildcards") + } +} + +// Export represents a single export +type Export struct { + Name string `json:"name,omitempty"` + Subject Subject `json:"subject,omitempty"` + Type ExportType `json:"type,omitempty"` + TokenReq bool `json:"token_req,omitempty"` + Revocations RevocationList `json:"revocations,omitempty"` + ResponseType ResponseType `json:"response_type,omitempty"` + ResponseThreshold time.Duration `json:"response_threshold,omitempty"` + Latency *ServiceLatency `json:"service_latency,omitempty"` + AccountTokenPosition uint `json:"account_token_position,omitempty"` + Advertise bool `json:"advertise,omitempty"` + Info +} + +// IsService returns true if an export is for a service +func (e *Export) IsService() bool { + return e.Type == Service +} + +// IsStream returns true if an export is for a stream +func (e *Export) IsStream() bool { + return e.Type == Stream +} + +// IsSingleResponse returns true if an export has a single response +// or no response type is set, also checks that the type is service +func (e *Export) IsSingleResponse() bool { + return e.Type == Service && (e.ResponseType == ResponseTypeSingleton || e.ResponseType == "") +} + +// IsChunkedResponse returns true if an export has a chunked response +func (e *Export) IsChunkedResponse() bool { + return e.Type == Service && e.ResponseType == ResponseTypeChunked +} + +// IsStreamResponse returns true if an export has a chunked response +func (e *Export) IsStreamResponse() bool { + return e.Type == Service && e.ResponseType == ResponseTypeStream +} + +// Validate appends validation issues to the passed in results list +func (e *Export) Validate(vr *ValidationResults) { + if e == nil { + vr.AddError("null export is not allowed") + return + } + if !e.IsService() && !e.IsStream() { + vr.AddError("invalid export type: %q", e.Type) + } + if e.IsService() && !e.IsSingleResponse() && !e.IsChunkedResponse() && !e.IsStreamResponse() { + vr.AddError("invalid response type for service: %q", e.ResponseType) + } + if e.IsStream() && e.ResponseType != "" { + vr.AddError("invalid response type for stream: %q", e.ResponseType) + } + if e.Latency != nil { + if !e.IsService() { + vr.AddError("latency tracking only permitted for services") + } + e.Latency.Validate(vr) + } + if e.ResponseThreshold.Nanoseconds() < 0 { + vr.AddError("negative response threshold is invalid") + } + if e.ResponseThreshold.Nanoseconds() > 0 && !e.IsService() { + vr.AddError("response threshold only valid for services") + } + e.Subject.Validate(vr) + if e.AccountTokenPosition > 0 { + if !e.Subject.HasWildCards() { + vr.AddError("Account Token Position can only be used with wildcard subjects: %s", e.Subject) + } else { + subj := string(e.Subject) + token := strings.Split(subj, ".") + tkCnt := uint(len(token)) + if e.AccountTokenPosition > tkCnt { + vr.AddError("Account Token Position %d exceeds length of subject '%s'", + e.AccountTokenPosition, e.Subject) + } else if tk := token[e.AccountTokenPosition-1]; tk != "*" { + vr.AddError("Account Token Position %d matches '%s' but must match a * in: %s", + e.AccountTokenPosition, tk, e.Subject) + } + } + } + e.Info.Validate(vr) +} + +// Revoke enters a revocation by publickey using time.Now(). +func (e *Export) Revoke(pubKey string) { + e.RevokeAt(pubKey, time.Now()) +} + +// RevokeAt enters a revocation by publickey and timestamp into this export +// If there is already a revocation for this public key that is newer, it is kept. +func (e *Export) RevokeAt(pubKey string, timestamp time.Time) { + if e.Revocations == nil { + e.Revocations = RevocationList{} + } + + e.Revocations.Revoke(pubKey, timestamp) +} + +// ClearRevocation removes any revocation for the public key +func (e *Export) ClearRevocation(pubKey string) { + e.Revocations.ClearRevocation(pubKey) +} + +// isRevoked checks if the public key is in the revoked list with a timestamp later than the one passed in. +// Generally this method is called with the subject and issue time of the jwt to be tested. +// DO NOT pass time.Now(), it will not produce a stable/expected response. +func (e *Export) isRevoked(pubKey string, claimIssuedAt time.Time) bool { + return e.Revocations.IsRevoked(pubKey, claimIssuedAt) +} + +// IsClaimRevoked checks if the activation revoked the claim passed in. +// Invalid claims (nil, no Subject or IssuedAt) will return true. +func (e *Export) IsClaimRevoked(claim *ActivationClaims) bool { + if claim == nil || claim.IssuedAt == 0 || claim.Subject == "" { + return true + } + return e.isRevoked(claim.Subject, time.Unix(claim.IssuedAt, 0)) +} + +// Exports is a slice of exports +type Exports []*Export + +// Add appends exports to the list +func (e *Exports) Add(i ...*Export) { + *e = append(*e, i...) +} + +func isContainedIn(kind ExportType, subjects []Subject, vr *ValidationResults) { + m := make(map[string]string) + for i, ns := range subjects { + for j, s := range subjects { + if i == j { + continue + } + if ns.IsContainedIn(s) { + str := string(s) + _, ok := m[str] + if !ok { + m[str] = string(ns) + } + } + } + } + + if len(m) != 0 { + for k, v := range m { + var vi ValidationIssue + vi.Blocking = true + vi.Description = fmt.Sprintf("%s export subject %q already exports %q", kind, k, v) + vr.Add(&vi) + } + } +} + +// Validate calls validate on all of the exports +func (e *Exports) Validate(vr *ValidationResults) error { + var serviceSubjects []Subject + var streamSubjects []Subject + + for _, v := range *e { + if v == nil { + vr.AddError("null export is not allowed") + continue + } + if v.IsService() { + serviceSubjects = append(serviceSubjects, v.Subject) + } else { + streamSubjects = append(streamSubjects, v.Subject) + } + v.Validate(vr) + } + + isContainedIn(Service, serviceSubjects, vr) + isContainedIn(Stream, streamSubjects, vr) + + return nil +} + +// HasExportContainingSubject checks if the export list has an export with the provided subject +func (e *Exports) HasExportContainingSubject(subject Subject) bool { + for _, s := range *e { + if subject.IsContainedIn(s.Subject) { + return true + } + } + return false +} + +func (e Exports) Len() int { + return len(e) +} + +func (e Exports) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e Exports) Less(i, j int) bool { + return e[i].Subject < e[j].Subject +} diff --git a/vendor/github.com/nats-io/jwt/v2/genericlaims.go b/vendor/github.com/nats-io/jwt/v2/genericlaims.go new file mode 100644 index 00000000..6793c9ea --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/genericlaims.go @@ -0,0 +1,157 @@ +/* + * Copyright 2018-2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "errors" + "strings" + + "github.com/nats-io/nkeys" +) + +// GenericClaims can be used to read a JWT as a map for any non-generic fields +type GenericClaims struct { + ClaimsData + Data map[string]interface{} `json:"nats,omitempty"` +} + +// NewGenericClaims creates a map-based Claims +func NewGenericClaims(subject string) *GenericClaims { + if subject == "" { + return nil + } + c := GenericClaims{} + c.Subject = subject + c.Data = make(map[string]interface{}) + return &c +} + +// DecodeGeneric takes a JWT string and decodes it into a ClaimsData and map +func DecodeGeneric(token string) (*GenericClaims, error) { + // must have 3 chunks + chunks := strings.Split(token, ".") + if len(chunks) != 3 { + return nil, errors.New("expected 3 chunks") + } + + // header + header, err := parseHeaders(chunks[0]) + if err != nil { + return nil, err + } + // claim + data, err := decodeString(chunks[1]) + if err != nil { + return nil, err + } + + gc := struct { + GenericClaims + GenericFields + }{} + if err := json.Unmarshal(data, &gc); err != nil { + return nil, err + } + + // sig + sig, err := decodeString(chunks[2]) + if err != nil { + return nil, err + } + + if header.Algorithm == AlgorithmNkeyOld { + if !gc.verify(chunks[1], sig) { + return nil, errors.New("claim failed V1 signature verification") + } + if tp := gc.GenericFields.Type; tp != "" { + // the conversion needs to be from a string because + // on custom types the type is not going to be one of + // the constants + gc.GenericClaims.Data["type"] = string(tp) + } + if tp := gc.GenericFields.Tags; len(tp) != 0 { + gc.GenericClaims.Data["tags"] = tp + } + + } else { + if !gc.verify(token[:len(chunks[0])+len(chunks[1])+1], sig) { + return nil, errors.New("claim failed V2 signature verification") + } + } + return &gc.GenericClaims, nil +} + +// Claims returns the standard part of the generic claim +func (gc *GenericClaims) Claims() *ClaimsData { + return &gc.ClaimsData +} + +// Payload returns the custom part of the claims data +func (gc *GenericClaims) Payload() interface{} { + return &gc.Data +} + +// Encode takes a generic claims and creates a JWT string +func (gc *GenericClaims) Encode(pair nkeys.KeyPair) (string, error) { + return gc.ClaimsData.encode(pair, gc) +} + +// Validate checks the generic part of the claims data +func (gc *GenericClaims) Validate(vr *ValidationResults) { + gc.ClaimsData.Validate(vr) +} + +func (gc *GenericClaims) String() string { + return gc.ClaimsData.String(gc) +} + +// ExpectedPrefixes returns the types allowed to encode a generic JWT, which is nil for all +func (gc *GenericClaims) ExpectedPrefixes() []nkeys.PrefixByte { + return nil +} + +func (gc *GenericClaims) ClaimType() ClaimType { + v, ok := gc.Data["type"] + if !ok { + v, ok = gc.Data["nats"] + if ok { + m, ok := v.(map[string]interface{}) + if ok { + v = m["type"] + } + } + } + + switch ct := v.(type) { + case string: + if IsGenericClaimType(ct) { + return GenericClaim + } + return ClaimType(ct) + case ClaimType: + return ct + default: + return "" + } +} + +func (gc *GenericClaims) updateVersion() { + if gc.Data != nil { + // store as float as that is what decoding with json does too + gc.Data["version"] = float64(libVersion) + } +} diff --git a/vendor/github.com/nats-io/jwt/v2/go_test.mod b/vendor/github.com/nats-io/jwt/v2/go_test.mod new file mode 100644 index 00000000..d1b7f95f --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/go_test.mod @@ -0,0 +1,10 @@ +module github.com/nats-io/jwt/v2 + +go 1.16 + +require ( + github.com/nats-io/jwt v1.2.2 + github.com/nats-io/nkeys v0.3.0 +) + +replace github.com/nats-io/jwt v1.2.2 => ../ diff --git a/vendor/github.com/nats-io/jwt/v2/go_test.sum b/vendor/github.com/nats-io/jwt/v2/go_test.sum new file mode 100644 index 00000000..e2eb2522 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/go_test.sum @@ -0,0 +1,9 @@ +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b h1:wSOdpTq0/eI46Ez/LkDwIsAKA71YP2SRKBODiRWM0as= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/vendor/github.com/nats-io/jwt/v2/header.go b/vendor/github.com/nats-io/jwt/v2/header.go new file mode 100644 index 00000000..eadd4eaa --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/header.go @@ -0,0 +1,78 @@ +/* + * Copyright 2018-2019 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" + "strings" +) + +const ( + // Version is semantic version. + Version = "2.3.0" + + // TokenTypeJwt is the JWT token type supported JWT tokens + // encoded and decoded by this library + // from RFC7519 5.1 "typ": + // it is RECOMMENDED that "JWT" always be spelled using uppercase characters for compatibility + TokenTypeJwt = "JWT" + + // AlgorithmNkey is the algorithm supported by JWT tokens + // encoded and decoded by this library + AlgorithmNkeyOld = "ed25519" + AlgorithmNkey = AlgorithmNkeyOld + "-nkey" +) + +// Header is a JWT Jose Header +type Header struct { + Type string `json:"typ"` + Algorithm string `json:"alg"` +} + +// Parses a header JWT token +func parseHeaders(s string) (*Header, error) { + h, err := decodeString(s) + if err != nil { + return nil, err + } + header := Header{} + if err := json.Unmarshal(h, &header); err != nil { + return nil, err + } + + if err := header.Valid(); err != nil { + return nil, err + } + return &header, nil +} + +// Valid validates the Header. It returns nil if the Header is +// a JWT header, and the algorithm used is the NKEY algorithm. +func (h *Header) Valid() error { + if TokenTypeJwt != strings.ToUpper(h.Type) { + return fmt.Errorf("not supported type %q", h.Type) + } + + alg := strings.ToLower(h.Algorithm) + if !strings.HasPrefix(alg, AlgorithmNkeyOld) { + return fmt.Errorf("unexpected %q algorithm", h.Algorithm) + } + if AlgorithmNkeyOld != alg && AlgorithmNkey != alg { + return fmt.Errorf("unexpected %q algorithm", h.Algorithm) + } + return nil +} diff --git a/vendor/github.com/nats-io/jwt/v2/imports.go b/vendor/github.com/nats-io/jwt/v2/imports.go new file mode 100644 index 00000000..4dc2f399 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/imports.go @@ -0,0 +1,168 @@ +/* + * Copyright 2018-2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +// Import describes a mapping from another account into this one +type Import struct { + Name string `json:"name,omitempty"` + // Subject field in an import is always from the perspective of the + // initial publisher - in the case of a stream it is the account owning + // the stream (the exporter), and in the case of a service it is the + // account making the request (the importer). + Subject Subject `json:"subject,omitempty"` + Account string `json:"account,omitempty"` + Token string `json:"token,omitempty"` + // Deprecated: use LocalSubject instead + // To field in an import is always from the perspective of the subscriber + // in the case of a stream it is the client of the stream (the importer), + // from the perspective of a service, it is the subscription waiting for + // requests (the exporter). If the field is empty, it will default to the + // value in the Subject field. + To Subject `json:"to,omitempty"` + // Local subject used to subscribe (for streams) and publish (for services) to. + // This value only needs setting if you want to change the value of Subject. + // If the value of Subject ends in > then LocalSubject needs to end in > as well. + // LocalSubject can contain $ wildcard references where number references the nth wildcard in Subject. + // The sum of wildcard reference and * tokens needs to match the number of * token in Subject. + LocalSubject RenamingSubject `json:"local_subject,omitempty"` + Type ExportType `json:"type,omitempty"` + Share bool `json:"share,omitempty"` +} + +// IsService returns true if the import is of type service +func (i *Import) IsService() bool { + return i.Type == Service +} + +// IsStream returns true if the import is of type stream +func (i *Import) IsStream() bool { + return i.Type == Stream +} + +// Returns the value of To without triggering the deprecation warning for a read +func (i *Import) GetTo() string { + return string(i.To) +} + +// Validate checks if an import is valid for the wrapping account +func (i *Import) Validate(actPubKey string, vr *ValidationResults) { + if i == nil { + vr.AddError("null import is not allowed") + return + } + if !i.IsService() && !i.IsStream() { + vr.AddError("invalid import type: %q", i.Type) + } + + if i.Account == "" { + vr.AddError("account to import from is not specified") + } + + if i.GetTo() != "" { + vr.AddWarning("the field to has been deprecated (use LocalSubject instead)") + } + + i.Subject.Validate(vr) + if i.LocalSubject != "" { + i.LocalSubject.Validate(i.Subject, vr) + if i.To != "" { + vr.AddError("Local Subject replaces To") + } + } + + if i.Share && !i.IsService() { + vr.AddError("sharing information (for latency tracking) is only valid for services: %q", i.Subject) + } + var act *ActivationClaims + + if i.Token != "" { + var err error + act, err = DecodeActivationClaims(i.Token) + if err != nil { + vr.AddError("import %q contains an invalid activation token", i.Subject) + } + } + + if act != nil { + if !(act.Issuer == i.Account || act.IssuerAccount == i.Account) { + vr.AddError("activation token doesn't match account for import %q", i.Subject) + } + if act.ClaimsData.Subject != actPubKey { + vr.AddError("activation token doesn't match account it is being included in, %q", i.Subject) + } + if act.ImportType != i.Type { + vr.AddError("mismatch between token import type %s and type of import %s", act.ImportType, i.Type) + } + act.validateWithTimeChecks(vr, false) + subj := i.Subject + if i.IsService() && i.To != "" { + subj = i.To + } + if !subj.IsContainedIn(act.ImportSubject) { + vr.AddError("activation token import subject %q doesn't match import %q", act.ImportSubject, i.Subject) + } + } +} + +// Imports is a list of import structs +type Imports []*Import + +// Validate checks if an import is valid for the wrapping account +func (i *Imports) Validate(acctPubKey string, vr *ValidationResults) { + toSet := make(map[Subject]struct{}, len(*i)) + for _, v := range *i { + if v == nil { + vr.AddError("null import is not allowed") + continue + } + if v.Type == Service { + sub := v.To + if sub == "" { + sub = v.LocalSubject.ToSubject() + } + if sub == "" { + sub = v.Subject + } + for k := range toSet { + if sub.IsContainedIn(k) || k.IsContainedIn(sub) { + vr.AddError("overlapping subject namespace for %q and %q", sub, k) + } + } + if _, ok := toSet[sub]; ok { + vr.AddError("overlapping subject namespace for %q", v.To) + } + toSet[sub] = struct{}{} + } + v.Validate(acctPubKey, vr) + } +} + +// Add is a simple way to add imports +func (i *Imports) Add(a ...*Import) { + *i = append(*i, a...) +} + +func (i Imports) Len() int { + return len(i) +} + +func (i Imports) Swap(j, k int) { + i[j], i[k] = i[k], i[j] +} + +func (i Imports) Less(j, k int) bool { + return i[j].Subject < i[k].Subject +} diff --git a/vendor/github.com/nats-io/jwt/v2/operator_claims.go b/vendor/github.com/nats-io/jwt/v2/operator_claims.go new file mode 100644 index 00000000..f806002d --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/operator_claims.go @@ -0,0 +1,245 @@ +/* + * Copyright 2018 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "errors" + "fmt" + "net/url" + "strconv" + "strings" + + "github.com/nats-io/nkeys" +) + +// Operator specific claims +type Operator struct { + // Slice of other operator NKeys that can be used to sign on behalf of the main + // operator identity. + SigningKeys StringList `json:"signing_keys,omitempty"` + // AccountServerURL is a partial URL like "https://host.domain.org:/jwt/v1" + // tools will use the prefix and build queries by appending /accounts/ + // or /operator to the path provided. Note this assumes that the account server + // can handle requests in a nats-account-server compatible way. See + // https://github.com/nats-io/nats-account-server. + AccountServerURL string `json:"account_server_url,omitempty"` + // A list of NATS urls (tls://host:port) where tools can connect to the server + // using proper credentials. + OperatorServiceURLs StringList `json:"operator_service_urls,omitempty"` + // Identity of the system account + SystemAccount string `json:"system_account,omitempty"` + // Min Server version + AssertServerVersion string `json:"assert_server_version,omitempty"` + // Signing of subordinate objects will require signing keys + StrictSigningKeyUsage bool `json:"strict_signing_key_usage,omitempty"` + GenericFields +} + +func ParseServerVersion(version string) (int, int, int, error) { + if version == "" { + return 0, 0, 0, nil + } + split := strings.Split(version, ".") + if len(split) != 3 { + return 0, 0, 0, fmt.Errorf("asserted server version must be of the form ..") + } else if major, err := strconv.Atoi(split[0]); err != nil { + return 0, 0, 0, fmt.Errorf("asserted server version cant parse %s to int", split[0]) + } else if minor, err := strconv.Atoi(split[1]); err != nil { + return 0, 0, 0, fmt.Errorf("asserted server version cant parse %s to int", split[1]) + } else if update, err := strconv.Atoi(split[2]); err != nil { + return 0, 0, 0, fmt.Errorf("asserted server version cant parse %s to int", split[2]) + } else if major < 0 || minor < 0 || update < 0 { + return 0, 0, 0, fmt.Errorf("asserted server version can'b contain negative values: %s", version) + } else { + return major, minor, update, nil + } +} + +// Validate checks the validity of the operators contents +func (o *Operator) Validate(vr *ValidationResults) { + if err := o.validateAccountServerURL(); err != nil { + vr.AddError(err.Error()) + } + + for _, v := range o.validateOperatorServiceURLs() { + if v != nil { + vr.AddError(v.Error()) + } + } + + for _, k := range o.SigningKeys { + if !nkeys.IsValidPublicOperatorKey(k) { + vr.AddError("%s is not an operator public key", k) + } + } + if o.SystemAccount != "" { + if !nkeys.IsValidPublicAccountKey(o.SystemAccount) { + vr.AddError("%s is not an account public key", o.SystemAccount) + } + } + if _, _, _, err := ParseServerVersion(o.AssertServerVersion); err != nil { + vr.AddError("assert server version error: %s", err) + } +} + +func (o *Operator) validateAccountServerURL() error { + if o.AccountServerURL != "" { + // We don't care what kind of URL it is so long as it parses + // and has a protocol. The account server may impose additional + // constraints on the type of URLs that it is able to notify to + u, err := url.Parse(o.AccountServerURL) + if err != nil { + return fmt.Errorf("error parsing account server url: %v", err) + } + if u.Scheme == "" { + return fmt.Errorf("account server url %q requires a protocol", o.AccountServerURL) + } + } + return nil +} + +// ValidateOperatorServiceURL returns an error if the URL is not a valid NATS or TLS url. +func ValidateOperatorServiceURL(v string) error { + // should be possible for the service url to not be expressed + if v == "" { + return nil + } + u, err := url.Parse(v) + if err != nil { + return fmt.Errorf("error parsing operator service url %q: %v", v, err) + } + + if u.User != nil { + return fmt.Errorf("operator service url %q - credentials are not supported", v) + } + + if u.Path != "" { + return fmt.Errorf("operator service url %q - paths are not supported", v) + } + + lcs := strings.ToLower(u.Scheme) + switch lcs { + case "nats": + return nil + case "tls": + return nil + default: + return fmt.Errorf("operator service url %q - protocol not supported (only 'nats' or 'tls' only)", v) + } +} + +func (o *Operator) validateOperatorServiceURLs() []error { + var errs []error + for _, v := range o.OperatorServiceURLs { + if v != "" { + if err := ValidateOperatorServiceURL(v); err != nil { + errs = append(errs, err) + } + } + } + return errs +} + +// OperatorClaims define the data for an operator JWT +type OperatorClaims struct { + ClaimsData + Operator `json:"nats,omitempty"` +} + +// NewOperatorClaims creates a new operator claim with the specified subject, which should be an operator public key +func NewOperatorClaims(subject string) *OperatorClaims { + if subject == "" { + return nil + } + c := &OperatorClaims{} + c.Subject = subject + c.Issuer = subject + return c +} + +// DidSign checks the claims against the operator's public key and its signing keys +func (oc *OperatorClaims) DidSign(op Claims) bool { + if op == nil { + return false + } + issuer := op.Claims().Issuer + if issuer == oc.Subject { + if !oc.StrictSigningKeyUsage { + return true + } + return op.Claims().Subject == oc.Subject + } + return oc.SigningKeys.Contains(issuer) +} + +// Encode the claims into a JWT string +func (oc *OperatorClaims) Encode(pair nkeys.KeyPair) (string, error) { + if !nkeys.IsValidPublicOperatorKey(oc.Subject) { + return "", errors.New("expected subject to be an operator public key") + } + err := oc.validateAccountServerURL() + if err != nil { + return "", err + } + oc.Type = OperatorClaim + return oc.ClaimsData.encode(pair, oc) +} + +func (oc *OperatorClaims) ClaimType() ClaimType { + return oc.Type +} + +// DecodeOperatorClaims tries to create an operator claims from a JWt string +func DecodeOperatorClaims(token string) (*OperatorClaims, error) { + claims, err := Decode(token) + if err != nil { + return nil, err + } + oc, ok := claims.(*OperatorClaims) + if !ok { + return nil, errors.New("not operator claim") + } + return oc, nil +} + +func (oc *OperatorClaims) String() string { + return oc.ClaimsData.String(oc) +} + +// Payload returns the operator specific data for an operator JWT +func (oc *OperatorClaims) Payload() interface{} { + return &oc.Operator +} + +// Validate the contents of the claims +func (oc *OperatorClaims) Validate(vr *ValidationResults) { + oc.ClaimsData.Validate(vr) + oc.Operator.Validate(vr) +} + +// ExpectedPrefixes defines the nkey types that can sign operator claims, operator +func (oc *OperatorClaims) ExpectedPrefixes() []nkeys.PrefixByte { + return []nkeys.PrefixByte{nkeys.PrefixByteOperator} +} + +// Claims returns the generic claims data +func (oc *OperatorClaims) Claims() *ClaimsData { + return &oc.ClaimsData +} + +func (oc *OperatorClaims) updateVersion() { + oc.GenericFields.Version = libVersion +} diff --git a/vendor/github.com/nats-io/jwt/v2/revocation_list.go b/vendor/github.com/nats-io/jwt/v2/revocation_list.go new file mode 100644 index 00000000..c582896b --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/revocation_list.go @@ -0,0 +1,84 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "time" +) + +const All = "*" + +// RevocationList is used to store a mapping of public keys to unix timestamps +type RevocationList map[string]int64 +type RevocationEntry struct { + PublicKey string + TimeStamp int64 +} + +// Revoke enters a revocation by publickey and timestamp into this export +// If there is already a revocation for this public key that is newer, it is kept. +func (r RevocationList) Revoke(pubKey string, timestamp time.Time) { + newTS := timestamp.Unix() + // cannot move a revocation into the future - only into the past + if ts, ok := r[pubKey]; ok && ts > newTS { + return + } + r[pubKey] = newTS +} + +// MaybeCompact will compact the revocation list if jwt.All is found. Any +// revocation that is covered by a jwt.All revocation will be deleted, thus +// reducing the size of the JWT. Returns a slice of entries that were removed +// during the process. +func (r RevocationList) MaybeCompact() []RevocationEntry { + var deleted []RevocationEntry + ats, ok := r[All] + if ok { + for k, ts := range r { + if k != All && ats >= ts { + deleted = append(deleted, RevocationEntry{ + PublicKey: k, + TimeStamp: ts, + }) + delete(r, k) + } + } + } + return deleted +} + +// ClearRevocation removes any revocation for the public key +func (r RevocationList) ClearRevocation(pubKey string) { + delete(r, pubKey) +} + +// IsRevoked checks if the public key is in the revoked list with a timestamp later than +// the one passed in. Generally this method is called with an issue time but other time's can +// be used for testing. +func (r RevocationList) IsRevoked(pubKey string, timestamp time.Time) bool { + if r.allRevoked(timestamp) { + return true + } + ts, ok := r[pubKey] + return ok && ts >= timestamp.Unix() +} + +// allRevoked returns true if All is set and the timestamp is later or same as the +// one passed. This is called by IsRevoked. +func (r RevocationList) allRevoked(timestamp time.Time) bool { + ts, ok := r[All] + return ok && ts >= timestamp.Unix() +} diff --git a/vendor/github.com/nats-io/jwt/v2/signingkeys.go b/vendor/github.com/nats-io/jwt/v2/signingkeys.go new file mode 100644 index 00000000..0d8a5b93 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/signingkeys.go @@ -0,0 +1,207 @@ +/* + * Copyright 2020 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/nats-io/nkeys" +) + +type Scope interface { + SigningKey() string + ValidateScopedSigner(claim Claims) error + Validate(vr *ValidationResults) +} + +type ScopeType int + +const ( + UserScopeType ScopeType = iota + 1 +) + +func (t ScopeType) String() string { + switch t { + case UserScopeType: + return "user_scope" + } + return "unknown" +} + +func (t *ScopeType) MarshalJSON() ([]byte, error) { + switch *t { + case UserScopeType: + return []byte("\"user_scope\""), nil + } + return nil, fmt.Errorf("unknown scope type %q", t) +} + +func (t *ScopeType) UnmarshalJSON(b []byte) error { + var s string + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + switch s { + case "user_scope": + *t = UserScopeType + return nil + } + return fmt.Errorf("unknown scope type %q", t) +} + +type UserScope struct { + Kind ScopeType `json:"kind"` + Key string `json:"key"` + Role string `json:"role"` + Template UserPermissionLimits `json:"template"` +} + +func NewUserScope() *UserScope { + var s UserScope + s.Kind = UserScopeType + s.Template.NatsLimits = NatsLimits{NoLimit, NoLimit, NoLimit} + return &s +} + +func (us UserScope) SigningKey() string { + return us.Key +} + +func (us UserScope) Validate(vr *ValidationResults) { + if !nkeys.IsValidPublicAccountKey(us.Key) { + vr.AddError("%s is not an account public key", us.Key) + } +} + +func (us UserScope) ValidateScopedSigner(c Claims) error { + uc, ok := c.(*UserClaims) + if !ok { + return fmt.Errorf("not an user claim - scoped signing key requires user claim") + } + if uc.Claims().Issuer != us.Key { + return errors.New("issuer not the scoped signer") + } + if !uc.HasEmptyPermissions() { + return errors.New("scoped users require no permissions or limits set") + } + return nil +} + +// SigningKeys is a map keyed by a public account key +type SigningKeys map[string]Scope + +func (sk SigningKeys) Validate(vr *ValidationResults) { + for k, v := range sk { + // regular signing keys won't have a scope + if v != nil { + v.Validate(vr) + } else { + if !nkeys.IsValidPublicAccountKey(k) { + vr.AddError("%q is not a valid account signing key", k) + } + } + } +} + +// MarshalJSON serializes the scoped signing keys as an array +func (sk *SigningKeys) MarshalJSON() ([]byte, error) { + if sk == nil { + return nil, nil + } + var a []interface{} + for k, v := range *sk { + if v != nil { + a = append(a, v) + } else { + a = append(a, k) + } + } + return json.Marshal(a) +} + +func (sk *SigningKeys) UnmarshalJSON(data []byte) error { + if *sk == nil { + *sk = make(SigningKeys) + } + // read an array - we can have a string or an map + var a []interface{} + if err := json.Unmarshal(data, &a); err != nil { + return err + } + for _, i := range a { + switch v := i.(type) { + case string: + (*sk)[v] = nil + case map[string]interface{}: + d, err := json.Marshal(v) + if err != nil { + return err + } + switch v["kind"] { + case UserScopeType.String(): + us := NewUserScope() + if err := json.Unmarshal(d, &us); err != nil { + return err + } + (*sk)[us.Key] = us + default: + return fmt.Errorf("unknown signing key scope %q", v["type"]) + } + } + } + return nil +} + +func (sk SigningKeys) Keys() []string { + var keys []string + for k := range sk { + keys = append(keys, k) + } + return keys +} + +// GetScope returns nil if the key is not associated +func (sk SigningKeys) GetScope(k string) (Scope, bool) { + v, ok := sk[k] + if !ok { + return nil, false + } + return v, true +} + +func (sk SigningKeys) Contains(k string) bool { + _, ok := sk[k] + return ok +} + +func (sk SigningKeys) Add(keys ...string) { + for _, k := range keys { + sk[k] = nil + } +} + +func (sk SigningKeys) AddScopedSigner(s Scope) { + sk[s.SigningKey()] = s +} + +func (sk SigningKeys) Remove(keys ...string) { + for _, k := range keys { + delete(sk, k) + } +} diff --git a/vendor/github.com/nats-io/jwt/v2/types.go b/vendor/github.com/nats-io/jwt/v2/types.go new file mode 100644 index 00000000..92abcf53 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/types.go @@ -0,0 +1,487 @@ +/* + * Copyright 2018-2019 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "encoding/json" + "fmt" + "net" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +const MaxInfoLength = 8 * 1024 + +type Info struct { + Description string `json:"description,omitempty"` + InfoURL string `json:"info_url,omitempty"` +} + +func (s Info) Validate(vr *ValidationResults) { + if len(s.Description) > MaxInfoLength { + vr.AddError("Description is too long") + } + if s.InfoURL != "" { + if len(s.InfoURL) > MaxInfoLength { + vr.AddError("Info URL is too long") + } + u, err := url.Parse(s.InfoURL) + if err == nil && (u.Hostname() == "" || u.Scheme == "") { + err = fmt.Errorf("no hostname or scheme") + } + if err != nil { + vr.AddError("error parsing info url: %v", err) + } + } +} + +// ExportType defines the type of import/export. +type ExportType int + +const ( + // Unknown is used if we don't know the type + Unknown ExportType = iota + // Stream defines the type field value for a stream "stream" + Stream + // Service defines the type field value for a service "service" + Service +) + +func (t ExportType) String() string { + switch t { + case Stream: + return "stream" + case Service: + return "service" + } + return "unknown" +} + +// MarshalJSON marshals the enum as a quoted json string +func (t *ExportType) MarshalJSON() ([]byte, error) { + switch *t { + case Stream: + return []byte("\"stream\""), nil + case Service: + return []byte("\"service\""), nil + } + return nil, fmt.Errorf("unknown export type") +} + +// UnmarshalJSON unmashals a quoted json string to the enum value +func (t *ExportType) UnmarshalJSON(b []byte) error { + var j string + err := json.Unmarshal(b, &j) + if err != nil { + return err + } + switch j { + case "stream": + *t = Stream + return nil + case "service": + *t = Service + return nil + } + return fmt.Errorf("unknown export type %q", j) +} + +type RenamingSubject Subject + +func (s RenamingSubject) Validate(from Subject, vr *ValidationResults) { + v := Subject(s) + v.Validate(vr) + if from == "" { + vr.AddError("subject cannot be empty") + } + if strings.Contains(string(s), " ") { + vr.AddError("subject %q cannot have spaces", v) + } + matchesSuffix := func(s Subject) bool { + return s == ">" || strings.HasSuffix(string(s), ".>") + } + if matchesSuffix(v) != matchesSuffix(from) { + vr.AddError("both, renaming subject and subject, need to end or not end in >") + } + fromCnt := from.countTokenWildcards() + refCnt := 0 + for _, tk := range strings.Split(string(v), ".") { + if tk == "*" { + refCnt++ + } + if len(tk) < 2 { + continue + } + if tk[0] == '$' { + if idx, err := strconv.Atoi(tk[1:]); err == nil { + if idx > fromCnt { + vr.AddError("Reference $%d in %q reference * in %q that do not exist", idx, s, from) + } else { + refCnt++ + } + } + } + } + if refCnt != fromCnt { + vr.AddError("subject does not contain enough * or reference wildcards $[0-9]") + } +} + +// Replaces reference tokens with * +func (s RenamingSubject) ToSubject() Subject { + if !strings.Contains(string(s), "$") { + return Subject(s) + } + bldr := strings.Builder{} + tokens := strings.Split(string(s), ".") + for i, tk := range tokens { + convert := false + if len(tk) > 1 && tk[0] == '$' { + if _, err := strconv.Atoi(tk[1:]); err == nil { + convert = true + } + } + if convert { + bldr.WriteString("*") + } else { + bldr.WriteString(tk) + } + if i != len(tokens)-1 { + bldr.WriteString(".") + } + } + return Subject(bldr.String()) +} + +// Subject is a string that represents a NATS subject +type Subject string + +// Validate checks that a subject string is valid, ie not empty and without spaces +func (s Subject) Validate(vr *ValidationResults) { + v := string(s) + if v == "" { + vr.AddError("subject cannot be empty") + } + if strings.Contains(v, " ") { + vr.AddError("subject %q cannot have spaces", v) + } +} + +func (s Subject) countTokenWildcards() int { + v := string(s) + if v == "*" { + return 1 + } + cnt := 0 + for _, t := range strings.Split(v, ".") { + if t == "*" { + cnt++ + } + } + return cnt +} + +// HasWildCards is used to check if a subject contains a > or * +func (s Subject) HasWildCards() bool { + v := string(s) + return strings.HasSuffix(v, ".>") || + strings.Contains(v, ".*.") || + strings.HasSuffix(v, ".*") || + strings.HasPrefix(v, "*.") || + v == "*" || + v == ">" +} + +// IsContainedIn does a simple test to see if the subject is contained in another subject +func (s Subject) IsContainedIn(other Subject) bool { + otherArray := strings.Split(string(other), ".") + myArray := strings.Split(string(s), ".") + + if len(myArray) > len(otherArray) && otherArray[len(otherArray)-1] != ">" { + return false + } + + if len(myArray) < len(otherArray) { + return false + } + + for ind, tok := range otherArray { + myTok := myArray[ind] + + if ind == len(otherArray)-1 && tok == ">" { + return true + } + + if tok != myTok && tok != "*" { + return false + } + } + + return true +} + +// TimeRange is used to represent a start and end time +type TimeRange struct { + Start string `json:"start,omitempty"` + End string `json:"end,omitempty"` +} + +// Validate checks the values in a time range struct +func (tr *TimeRange) Validate(vr *ValidationResults) { + format := "15:04:05" + + if tr.Start == "" { + vr.AddError("time ranges start must contain a start") + } else { + _, err := time.Parse(format, tr.Start) + if err != nil { + vr.AddError("start in time range is invalid %q", tr.Start) + } + } + + if tr.End == "" { + vr.AddError("time ranges end must contain an end") + } else { + _, err := time.Parse(format, tr.End) + if err != nil { + vr.AddError("end in time range is invalid %q", tr.End) + } + } +} + +// Src is a comma separated list of CIDR specifications +type UserLimits struct { + Src CIDRList `json:"src,omitempty"` + Times []TimeRange `json:"times,omitempty"` + Locale string `json:"times_location,omitempty"` +} + +func (u *UserLimits) Empty() bool { + return reflect.DeepEqual(*u, UserLimits{}) +} + +func (u *UserLimits) IsUnlimited() bool { + return len(u.Src) == 0 && len(u.Times) == 0 +} + +// Limits are used to control acccess for users and importing accounts +type Limits struct { + UserLimits + NatsLimits +} + +func (l *Limits) IsUnlimited() bool { + return l.UserLimits.IsUnlimited() && l.NatsLimits.IsUnlimited() +} + +// Validate checks the values in a limit struct +func (l *Limits) Validate(vr *ValidationResults) { + if len(l.Src) != 0 { + for _, cidr := range l.Src { + _, ipNet, err := net.ParseCIDR(cidr) + if err != nil || ipNet == nil { + vr.AddError("invalid cidr %q in user src limits", cidr) + } + } + } + + if l.Times != nil && len(l.Times) > 0 { + for _, t := range l.Times { + t.Validate(vr) + } + } + + if l.Locale != "" { + if _, err := time.LoadLocation(l.Locale); err != nil { + vr.AddError("could not parse iana time zone by name: %v", err) + } + } +} + +// Permission defines allow/deny subjects +type Permission struct { + Allow StringList `json:"allow,omitempty"` + Deny StringList `json:"deny,omitempty"` +} + +func (p *Permission) Empty() bool { + return len(p.Allow) == 0 && len(p.Deny) == 0 +} + +func checkPermission(vr *ValidationResults, subj string, permitQueue bool) { + tk := strings.Split(subj, " ") + switch len(tk) { + case 1: + Subject(tk[0]).Validate(vr) + case 2: + Subject(tk[0]).Validate(vr) + Subject(tk[1]).Validate(vr) + if !permitQueue { + vr.AddError(`Permission Subject "%s" is not allowed to contain queue`, subj) + } + default: + vr.AddError(`Permission Subject "%s" contains too many spaces`, subj) + } +} + +// Validate the allow, deny elements of a permission +func (p *Permission) Validate(vr *ValidationResults, permitQueue bool) { + for _, subj := range p.Allow { + checkPermission(vr, subj, permitQueue) + } + for _, subj := range p.Deny { + checkPermission(vr, subj, permitQueue) + } +} + +// ResponsePermission can be used to allow responses to any reply subject +// that is received on a valid subscription. +type ResponsePermission struct { + MaxMsgs int `json:"max"` + Expires time.Duration `json:"ttl"` +} + +// Validate the response permission. +func (p *ResponsePermission) Validate(_ *ValidationResults) { + // Any values can be valid for now. +} + +// Permissions are used to restrict subject access, either on a user or for everyone on a server by default +type Permissions struct { + Pub Permission `json:"pub,omitempty"` + Sub Permission `json:"sub,omitempty"` + Resp *ResponsePermission `json:"resp,omitempty"` +} + +// Validate the pub and sub fields in the permissions list +func (p *Permissions) Validate(vr *ValidationResults) { + if p.Resp != nil { + p.Resp.Validate(vr) + } + p.Sub.Validate(vr, true) + p.Pub.Validate(vr, false) +} + +// StringList is a wrapper for an array of strings +type StringList []string + +// Contains returns true if the list contains the string +func (u *StringList) Contains(p string) bool { + for _, t := range *u { + if t == p { + return true + } + } + return false +} + +// Add appends 1 or more strings to a list +func (u *StringList) Add(p ...string) { + for _, v := range p { + if !u.Contains(v) && v != "" { + *u = append(*u, v) + } + } +} + +// Remove removes 1 or more strings from a list +func (u *StringList) Remove(p ...string) { + for _, v := range p { + for i, t := range *u { + if t == v { + a := *u + *u = append(a[:i], a[i+1:]...) + break + } + } + } +} + +// TagList is a unique array of lower case strings +// All tag list methods lower case the strings in the arguments +type TagList []string + +// Contains returns true if the list contains the tags +func (u *TagList) Contains(p string) bool { + p = strings.ToLower(strings.TrimSpace(p)) + for _, t := range *u { + if t == p { + return true + } + } + return false +} + +// Add appends 1 or more tags to a list +func (u *TagList) Add(p ...string) { + for _, v := range p { + v = strings.ToLower(strings.TrimSpace(v)) + if !u.Contains(v) && v != "" { + *u = append(*u, v) + } + } +} + +// Remove removes 1 or more tags from a list +func (u *TagList) Remove(p ...string) { + for _, v := range p { + v = strings.ToLower(strings.TrimSpace(v)) + for i, t := range *u { + if t == v { + a := *u + *u = append(a[:i], a[i+1:]...) + break + } + } + } +} + +type CIDRList TagList + +func (c *CIDRList) Contains(p string) bool { + return (*TagList)(c).Contains(p) +} + +func (c *CIDRList) Add(p ...string) { + (*TagList)(c).Add(p...) +} + +func (c *CIDRList) Remove(p ...string) { + (*TagList)(c).Remove(p...) +} + +func (c *CIDRList) Set(values string) { + *c = CIDRList{} + c.Add(strings.Split(strings.ToLower(values), ",")...) +} + +func (c *CIDRList) UnmarshalJSON(body []byte) (err error) { + // parse either as array of strings or comma separate list + var request []string + var list string + if err := json.Unmarshal(body, &request); err == nil { + *c = request + return nil + } else if err := json.Unmarshal(body, &list); err == nil { + c.Set(list) + return nil + } else { + return err + } +} diff --git a/vendor/github.com/nats-io/jwt/v2/user_claims.go b/vendor/github.com/nats-io/jwt/v2/user_claims.go new file mode 100644 index 00000000..d8a3a6c9 --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/user_claims.go @@ -0,0 +1,153 @@ +/* + * Copyright 2018-2019 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "errors" + "reflect" + + "github.com/nats-io/nkeys" +) + +const ( + ConnectionTypeStandard = "STANDARD" + ConnectionTypeWebsocket = "WEBSOCKET" + ConnectionTypeLeafnode = "LEAFNODE" + ConnectionTypeLeafnodeWS = "LEAFNODE_WS" + ConnectionTypeMqtt = "MQTT" + ConnectionTypeMqttWS = "MQTT_WS" +) + +type UserPermissionLimits struct { + Permissions + Limits + BearerToken bool `json:"bearer_token,omitempty"` + AllowedConnectionTypes StringList `json:"allowed_connection_types,omitempty"` +} + +// User defines the user specific data in a user JWT +type User struct { + UserPermissionLimits + // IssuerAccount stores the public key for the account the issuer represents. + // When set, the claim was issued by a signing key. + IssuerAccount string `json:"issuer_account,omitempty"` + GenericFields +} + +// Validate checks the permissions and limits in a User jwt +func (u *User) Validate(vr *ValidationResults) { + u.Permissions.Validate(vr) + u.Limits.Validate(vr) + // When BearerToken is true server will ignore any nonce-signing verification +} + +// UserClaims defines a user JWT +type UserClaims struct { + ClaimsData + User `json:"nats,omitempty"` +} + +// NewUserClaims creates a user JWT with the specific subject/public key +func NewUserClaims(subject string) *UserClaims { + if subject == "" { + return nil + } + c := &UserClaims{} + c.Subject = subject + c.Limits = Limits{ + UserLimits{CIDRList{}, nil, ""}, + NatsLimits{NoLimit, NoLimit, NoLimit}, + } + return c +} + +func (u *UserClaims) SetScoped(t bool) { + if t { + u.UserPermissionLimits = UserPermissionLimits{} + } else { + u.Limits = Limits{ + UserLimits{CIDRList{}, nil, ""}, + NatsLimits{NoLimit, NoLimit, NoLimit}, + } + } +} + +func (u *UserClaims) HasEmptyPermissions() bool { + return reflect.DeepEqual(u.UserPermissionLimits, UserPermissionLimits{}) +} + +// Encode tries to turn the user claims into a JWT string +func (u *UserClaims) Encode(pair nkeys.KeyPair) (string, error) { + if !nkeys.IsValidPublicUserKey(u.Subject) { + return "", errors.New("expected subject to be user public key") + } + u.Type = UserClaim + return u.ClaimsData.encode(pair, u) +} + +// DecodeUserClaims tries to parse a user claims from a JWT string +func DecodeUserClaims(token string) (*UserClaims, error) { + claims, err := Decode(token) + if err != nil { + return nil, err + } + ac, ok := claims.(*UserClaims) + if !ok { + return nil, errors.New("not user claim") + } + return ac, nil +} + +func (u *UserClaims) ClaimType() ClaimType { + return u.Type +} + +// Validate checks the generic and specific parts of the user jwt +func (u *UserClaims) Validate(vr *ValidationResults) { + u.ClaimsData.Validate(vr) + u.User.Validate(vr) + if u.IssuerAccount != "" && !nkeys.IsValidPublicAccountKey(u.IssuerAccount) { + vr.AddError("account_id is not an account public key") + } +} + +// ExpectedPrefixes defines the types that can encode a user JWT, account +func (u *UserClaims) ExpectedPrefixes() []nkeys.PrefixByte { + return []nkeys.PrefixByte{nkeys.PrefixByteAccount} +} + +// Claims returns the generic data from a user jwt +func (u *UserClaims) Claims() *ClaimsData { + return &u.ClaimsData +} + +// Payload returns the user specific data from a user JWT +func (u *UserClaims) Payload() interface{} { + return &u.User +} + +func (u *UserClaims) String() string { + return u.ClaimsData.String(u) +} + +func (u *UserClaims) updateVersion() { + u.GenericFields.Version = libVersion +} + +// IsBearerToken returns true if nonce-signing requirements should be skipped +func (u *UserClaims) IsBearerToken() bool { + return u.BearerToken +} diff --git a/vendor/github.com/nats-io/jwt/v2/validation.go b/vendor/github.com/nats-io/jwt/v2/validation.go new file mode 100644 index 00000000..afaed57f --- /dev/null +++ b/vendor/github.com/nats-io/jwt/v2/validation.go @@ -0,0 +1,118 @@ +/* + * Copyright 2018 The NATS Authors + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package jwt + +import ( + "errors" + "fmt" +) + +// ValidationIssue represents an issue during JWT validation, it may or may not be a blocking error +type ValidationIssue struct { + Description string + Blocking bool + TimeCheck bool +} + +func (ve *ValidationIssue) Error() string { + return ve.Description +} + +// ValidationResults is a list of ValidationIssue pointers +type ValidationResults struct { + Issues []*ValidationIssue +} + +// CreateValidationResults creates an empty list of validation issues +func CreateValidationResults() *ValidationResults { + var issues []*ValidationIssue + return &ValidationResults{ + Issues: issues, + } +} + +//Add appends an issue to the list +func (v *ValidationResults) Add(vi *ValidationIssue) { + v.Issues = append(v.Issues, vi) +} + +// AddError creates a new validation error and adds it to the list +func (v *ValidationResults) AddError(format string, args ...interface{}) { + v.Add(&ValidationIssue{ + Description: fmt.Sprintf(format, args...), + Blocking: true, + TimeCheck: false, + }) +} + +// AddTimeCheck creates a new validation issue related to a time check and adds it to the list +func (v *ValidationResults) AddTimeCheck(format string, args ...interface{}) { + v.Add(&ValidationIssue{ + Description: fmt.Sprintf(format, args...), + Blocking: false, + TimeCheck: true, + }) +} + +// AddWarning creates a new validation warning and adds it to the list +func (v *ValidationResults) AddWarning(format string, args ...interface{}) { + v.Add(&ValidationIssue{ + Description: fmt.Sprintf(format, args...), + Blocking: false, + TimeCheck: false, + }) +} + +// IsBlocking returns true if the list contains a blocking error +func (v *ValidationResults) IsBlocking(includeTimeChecks bool) bool { + for _, i := range v.Issues { + if i.Blocking { + return true + } + + if includeTimeChecks && i.TimeCheck { + return true + } + } + return false +} + +// IsEmpty returns true if the list is empty +func (v *ValidationResults) IsEmpty() bool { + return len(v.Issues) == 0 +} + +// Errors returns only blocking issues as errors +func (v *ValidationResults) Errors() []error { + var errs []error + for _, v := range v.Issues { + if v.Blocking { + errs = append(errs, errors.New(v.Description)) + } + } + return errs +} + +// Warnings returns only non blocking issues as strings +func (v *ValidationResults) Warnings() []string { + var errs []string + for _, v := range v.Issues { + if !v.Blocking { + errs = append(errs, v.Description) + } + } + return errs +} diff --git a/vendor/github.com/nats-io/nats.go/.gitignore b/vendor/github.com/nats-io/nats.go/.gitignore new file mode 100644 index 00000000..ae4871f4 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.gitignore @@ -0,0 +1,45 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +# Emacs +*~ +\#*\# +.\#* + +# vi/vim +.??*.swp + +# Mac +.DS_Store + +# Eclipse +.project +.settings/ + +# bin + +# Goland +.idea + +# VS Code +.vscode \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/.travis.yml b/vendor/github.com/nats-io/nats.go/.travis.yml new file mode 100644 index 00000000..7268558a --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.travis.yml @@ -0,0 +1,25 @@ +language: go +go: +- "1.20.x" +- "1.19.x" +go_import_path: github.com/nats-io/nats.go +install: +- go get -t ./... +- if [[ "$TRAVIS_GO_VERSION" =~ 1.20 ]]; then + go install github.com/mattn/goveralls@latest; + go install github.com/wadey/gocovmerge@latest; + go install honnef.co/go/tools/cmd/staticcheck@latest; + go install github.com/client9/misspell/cmd/misspell@latest; + fi +before_script: +- $(exit $(go fmt ./... | wc -l)) +- go vet -modfile=go_test.mod ./... +- if [[ "$TRAVIS_GO_VERSION" =~ 1.20 ]]; then + find . -type f -name "*.go" | xargs misspell -error -locale US; + GOFLAGS="-mod=mod -modfile=go_test.mod" staticcheck ./...; + fi +script: +- go test -modfile=go_test.mod -v -run=TestNoRace -p=1 ./... --failfast -vet=off +- if [[ "$TRAVIS_GO_VERSION" =~ 1.20 ]]; then ./scripts/cov.sh TRAVIS; else go test -modfile=go_test.mod -race -v -p=1 ./... --failfast -vet=off; fi +after_success: +- if [[ "$TRAVIS_GO_VERSION" =~ 1.20 ]]; then $HOME/gopath/bin/goveralls -coverprofile=acc.out -service travis-ci; fi diff --git a/vendor/github.com/nats-io/nats.go/.words b/vendor/github.com/nats-io/nats.go/.words new file mode 100644 index 00000000..24be7f62 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.words @@ -0,0 +1,106 @@ +1 + +derek +dlc +ivan + +acknowledgement/SM +arity +deduplication/S +demarshal/SDG +durables +iff +observable/S +redelivery/S +retransmitting +retry/SB + +SlowConsumer + +AppendInt +ReadMIMEHeader + +clientProtoZero +jetstream +v1 +v2 + +ack/SGD +auth +authToken +chans +creds +config/S +cseq +impl +msgh +msgId +mux/S +nack +ptr +puback +scanf +stderr +stdout +structs +tm +todo +unsub/S + +permessage +permessage-deflate +urlA +urlB +websocket +ws +wss + +NKey +pList + +backend/S +backoff/S +decompressor/CGS +inflight +inlined +lookups +reconnection/MS +redeliver/ADGS +responder/S +rewrap/S +rollup/S +unreceive/DRSZGB +variadic +wakeup/S +whitespace +wrap/AS + +omitempty + +apache +html +ietf +www + +sum256 +32bit/S +64bit/S +64k +128k +512k + +hacky +handroll/D + +rfc6455 +rfc7692 +0x00 +0xff +20x +40x +50x + +ErrXXX + +atlanta +eu diff --git a/vendor/github.com/nats-io/nats.go/.words.readme b/vendor/github.com/nats-io/nats.go/.words.readme new file mode 100644 index 00000000..9d9f5cbb --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/.words.readme @@ -0,0 +1,25 @@ +The .words file is used by gospel (v1.2+), which wraps the Hunspell libraries +but populates the dictionary with identifiers from the Go source. + + + +Alas, no comments are allowed in the .words file and newer versions of gospel +error out on seeing them. This is really a hunspell restriction. + +We assume en_US hunspell dictionaries are installed and used. +The /AFFIXRULES are defined in en_US.aff (eg: /usr/share/hunspell/en_US.aff) +Invoke `hunspell -D` to see the actual locations. + +Words which are in the base dictionary can't have extra affix rules added to +them, so we have to start with the affixed variant we want to add. +Thus `creds` rather than `cred/S` and so on. + +So we can't use receive/DRSZGBU, adding 'U', to allow unreceive and variants, +we have to use unreceive as the stem. + +We can't define our own affix or compound rules, +to capture rfc\d{3,} or 0x[0-9A-Fa-f]{2} + +The spelling tokenizer doesn't take "permessage-deflate" as allowing for ... +"permessage-deflate", which is an RFC7692 registered extension for websockets. +We have to explicitly list "permessage". diff --git a/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md b/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md new file mode 100644 index 00000000..b850d49e --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/CODE-OF-CONDUCT.md @@ -0,0 +1,3 @@ +## Community Code of Conduct + +NATS follows the [CNCF Code of Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md). diff --git a/vendor/github.com/nats-io/nats.go/GOVERNANCE.md b/vendor/github.com/nats-io/nats.go/GOVERNANCE.md new file mode 100644 index 00000000..1d5a7be3 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS Go Client Governance + +NATS Go Client (go-nats) is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nats.go/LICENSE b/vendor/github.com/nats-io/nats.go/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nats.go/MAINTAINERS.md b/vendor/github.com/nats-io/nats.go/MAINTAINERS.md new file mode 100644 index 00000000..23214655 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) + - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) + - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nats.go/README.md b/vendor/github.com/nats-io/nats.go/README.md new file mode 100644 index 00000000..c65ae777 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/README.md @@ -0,0 +1,517 @@ +# NATS - Go Client +A [Go](http://golang.org) client for the [NATS messaging system](https://nats.io). + +[![License Apache 2][License-Image]][License-Url] [![Go Report Card][ReportCard-Image]][ReportCard-Url] [![Build Status][Build-Status-Image]][Build-Status-Url] [![GoDoc][GoDoc-Image]][GoDoc-Url] [![Coverage Status][Coverage-image]][Coverage-Url] + +[License-Url]: https://www.apache.org/licenses/LICENSE-2.0 +[License-Image]: https://img.shields.io/badge/License-Apache2-blue.svg +[ReportCard-Url]: https://goreportcard.com/report/github.com/nats-io/nats.go +[ReportCard-Image]: https://goreportcard.com/badge/github.com/nats-io/nats.go +[Build-Status-Url]: https://travis-ci.com/github/nats-io/nats.go +[Build-Status-Image]: https://travis-ci.com/nats-io/nats.go.svg?branch=main +[GoDoc-Url]: https://pkg.go.dev/github.com/nats-io/nats.go +[GoDoc-Image]: https://img.shields.io/badge/GoDoc-reference-007d9c +[Coverage-Url]: https://coveralls.io/r/nats-io/nats.go?branch=main +[Coverage-image]: https://coveralls.io/repos/github/nats-io/nats.go/badge.svg?branch=main + +## Installation + +```bash +# Go client +go get github.com/nats-io/nats.go/ + +# Server +go get github.com/nats-io/nats-server +``` + +When using or transitioning to Go modules support: + +```bash +# Go client latest or explicit version +go get github.com/nats-io/nats.go/@latest +go get github.com/nats-io/nats.go/@v1.24.0 + +# For latest NATS Server, add /v2 at the end +go get github.com/nats-io/nats-server/v2 + +# NATS Server v1 is installed otherwise +# go get github.com/nats-io/nats-server +``` + +## Basic Usage + +```go +import "github.com/nats-io/nats.go" + +// Connect to a server +nc, _ := nats.Connect(nats.DefaultURL) + +// Simple Publisher +nc.Publish("foo", []byte("Hello World")) + +// Simple Async Subscriber +nc.Subscribe("foo", func(m *nats.Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +// Responding to a request message +nc.Subscribe("request", func(m *nats.Msg) { + m.Respond([]byte("answer is 42")) +}) + +// Simple Sync Subscriber +sub, err := nc.SubscribeSync("foo") +m, err := sub.NextMsg(timeout) + +// Channel Subscriber +ch := make(chan *nats.Msg, 64) +sub, err := nc.ChanSubscribe("foo", ch) +msg := <- ch + +// Unsubscribe +sub.Unsubscribe() + +// Drain +sub.Drain() + +// Requests +msg, err := nc.Request("help", []byte("help me"), 10*time.Millisecond) + +// Replies +nc.Subscribe("help", func(m *nats.Msg) { + nc.Publish(m.Reply, []byte("I can help!")) +}) + +// Drain connection (Preferred for responders) +// Close() not needed if this is called. +nc.Drain() + +// Close connection +nc.Close() +``` + +## JetStream Basic Usage + +```go +import "github.com/nats-io/nats.go" + +// Connect to NATS +nc, _ := nats.Connect(nats.DefaultURL) + +// Create JetStream Context +js, _ := nc.JetStream(nats.PublishAsyncMaxPending(256)) + +// Simple Stream Publisher +js.Publish("ORDERS.scratch", []byte("hello")) + +// Simple Async Stream Publisher +for i := 0; i < 500; i++ { + js.PublishAsync("ORDERS.scratch", []byte("hello")) +} +select { +case <-js.PublishAsyncComplete(): +case <-time.After(5 * time.Second): + fmt.Println("Did not resolve in time") +} + +// Simple Async Ephemeral Consumer +js.Subscribe("ORDERS.*", func(m *nats.Msg) { + fmt.Printf("Received a JetStream message: %s\n", string(m.Data)) +}) + +// Simple Sync Durable Consumer (optional SubOpts at the end) +sub, err := js.SubscribeSync("ORDERS.*", nats.Durable("MONITOR"), nats.MaxDeliver(3)) +m, err := sub.NextMsg(timeout) + +// Simple Pull Consumer +sub, err := js.PullSubscribe("ORDERS.*", "MONITOR") +msgs, err := sub.Fetch(10) + +// Unsubscribe +sub.Unsubscribe() + +// Drain +sub.Drain() +``` + +## JetStream Basic Management + +```go +import "github.com/nats-io/nats.go" + +// Connect to NATS +nc, _ := nats.Connect(nats.DefaultURL) + +// Create JetStream Context +js, _ := nc.JetStream() + +// Create a Stream +js.AddStream(&nats.StreamConfig{ + Name: "ORDERS", + Subjects: []string{"ORDERS.*"}, +}) + +// Update a Stream +js.UpdateStream(&nats.StreamConfig{ + Name: "ORDERS", + MaxBytes: 8, +}) + +// Create a Consumer +js.AddConsumer("ORDERS", &nats.ConsumerConfig{ + Durable: "MONITOR", +}) + +// Delete Consumer +js.DeleteConsumer("ORDERS", "MONITOR") + +// Delete Stream +js.DeleteStream("ORDERS") +``` + +## Encoded Connections + +```go + +nc, _ := nats.Connect(nats.DefaultURL) +c, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer c.Close() + +// Simple Publisher +c.Publish("foo", "Hello World") + +// Simple Async Subscriber +c.Subscribe("foo", func(s string) { + fmt.Printf("Received a message: %s\n", s) +}) + +// EncodedConn can Publish any raw Go type using the registered Encoder +type person struct { + Name string + Address string + Age int +} + +// Go type Subscriber +c.Subscribe("hello", func(p *person) { + fmt.Printf("Received a person: %+v\n", p) +}) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street, San Francisco, CA"} + +// Go type Publisher +c.Publish("hello", me) + +// Unsubscribe +sub, err := c.Subscribe("foo", nil) +// ... +sub.Unsubscribe() + +// Requests +var response string +err = c.Request("help", "help me", &response, 10*time.Millisecond) +if err != nil { + fmt.Printf("Request failed: %v\n", err) +} + +// Replying +c.Subscribe("help", func(subj, reply string, msg string) { + c.Publish(reply, "I can help!") +}) + +// Close connection +c.Close(); +``` + +## New Authentication (Nkeys and User Credentials) +This requires server with version >= 2.0.0 + +NATS servers have a new security and authentication mechanism to authenticate with user credentials and Nkeys. +The simplest form is to use the helper method UserCredentials(credsFilepath). +```go +nc, err := nats.Connect(url, nats.UserCredentials("user.creds")) +``` + +The helper methods creates two callback handlers to present the user JWT and sign the nonce challenge from the server. +The core client library never has direct access to your private key and simply performs the callback for signing the server challenge. +The helper will load and wipe and erase memory it uses for each connect or reconnect. + +The helper also can take two entries, one for the JWT and one for the NKey seed file. +```go +nc, err := nats.Connect(url, nats.UserCredentials("user.jwt", "user.nk")) +``` + +You can also set the callback handlers directly and manage challenge signing directly. +```go +nc, err := nats.Connect(url, nats.UserJWT(jwtCB, sigCB)) +``` + +Bare Nkeys are also supported. The nkey seed should be in a read only file, e.g. seed.txt +```bash +> cat seed.txt +# This is my seed nkey! +SUAGMJH5XLGZKQQWAWKRZJIGMOU4HPFUYLXJMXOO5NLFEO2OOQJ5LPRDPM +``` + +This is a helper function which will load and decode and do the proper signing for the server nonce. +It will clear memory in between invocations. +You can choose to use the low level option and provide the public key and a signature callback on your own. + +```go +opt, err := nats.NkeyOptionFromSeed("seed.txt") +nc, err := nats.Connect(serverUrl, opt) + +// Direct +nc, err := nats.Connect(serverUrl, nats.Nkey(pubNkey, sigCB)) +``` + +## TLS + +```go +// tls as a scheme will enable secure connections by default. This will also verify the server name. +nc, err := nats.Connect("tls://nats.demo.io:4443") + +// If you are using a self-signed certificate, you need to have a tls.Config with RootCAs setup. +// We provide a helper method to make this case easier. +nc, err = nats.Connect("tls://localhost:4443", nats.RootCAs("./configs/certs/ca.pem")) + +// If the server requires client certificate, there is an helper function for that too: +cert := nats.ClientCert("./configs/certs/client-cert.pem", "./configs/certs/client-key.pem") +nc, err = nats.Connect("tls://localhost:4443", cert) + +// You can also supply a complete tls.Config + +certFile := "./configs/certs/client-cert.pem" +keyFile := "./configs/certs/client-key.pem" +cert, err := tls.LoadX509KeyPair(certFile, keyFile) +if err != nil { + t.Fatalf("error parsing X509 certificate/key pair: %v", err) +} + +config := &tls.Config{ + ServerName: opts.Host, + Certificates: []tls.Certificate{cert}, + RootCAs: pool, + MinVersion: tls.VersionTLS12, +} + +nc, err = nats.Connect("nats://localhost:4443", nats.Secure(config)) +if err != nil { + t.Fatalf("Got an error on Connect with Secure Options: %+v\n", err) +} + +``` + +## Using Go Channels (netchan) + +```go +nc, _ := nats.Connect(nats.DefaultURL) +ec, _ := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +defer ec.Close() + +type person struct { + Name string + Address string + Age int +} + +recvCh := make(chan *person) +ec.BindRecvChan("hello", recvCh) + +sendCh := make(chan *person) +ec.BindSendChan("hello", sendCh) + +me := &person{Name: "derek", Age: 22, Address: "140 New Montgomery Street"} + +// Send via Go channels +sendCh <- me + +// Receive via Go channels +who := <- recvCh +``` + +## Wildcard Subscriptions + +```go + +// "*" matches any token, at any level of the subject. +nc.Subscribe("foo.*.baz", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +nc.Subscribe("foo.bar.*", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// ">" matches any length of the tail of a subject, and can only be the last token +// E.g. 'foo.>' will match 'foo.bar', 'foo.bar.baz', 'foo.foo.bar.bax.22' +nc.Subscribe("foo.>", func(m *Msg) { + fmt.Printf("Msg received on [%s] : %s\n", m.Subject, string(m.Data)); +}) + +// Matches all of the above +nc.Publish("foo.bar.baz", []byte("Hello World")) + +``` + +## Queue Groups + +```go +// All subscriptions with the same queue name will form a queue group. +// Each message will be delivered to only one subscriber per queue group, +// using queuing semantics. You can have as many queue groups as you wish. +// Normal subscribers will continue to work as expected. + +nc.QueueSubscribe("foo", "job_workers", func(_ *Msg) { + received += 1; +}) +``` + +## Advanced Usage + +```go + +// Normally, the library will return an error when trying to connect and +// there is no server running. The RetryOnFailedConnect option will set +// the connection in reconnecting state if it failed to connect right away. +nc, err := nats.Connect(nats.DefaultURL, + nats.RetryOnFailedConnect(true), + nats.MaxReconnects(10), + nats.ReconnectWait(time.Second), + nats.ReconnectHandler(func(_ *nats.Conn) { + // Note that this will be invoked for the first asynchronous connect. + })) +if err != nil { + // Should not return an error even if it can't connect, but you still + // need to check in case there are some configuration errors. +} + +// Flush connection to server, returns when all messages have been processed. +nc.Flush() +fmt.Println("All clear!") + +// FlushTimeout specifies a timeout value as well. +err := nc.FlushTimeout(1*time.Second) +if err != nil { + fmt.Println("All clear!") +} else { + fmt.Println("Flushed timed out!") +} + +// Auto-unsubscribe after MAX_WANTED messages received +const MAX_WANTED = 10 +sub, err := nc.Subscribe("foo") +sub.AutoUnsubscribe(MAX_WANTED) + +// Multiple connections +nc1 := nats.Connect("nats://host1:4222") +nc2 := nats.Connect("nats://host2:4222") + +nc1.Subscribe("foo", func(m *Msg) { + fmt.Printf("Received a message: %s\n", string(m.Data)) +}) + +nc2.Publish("foo", []byte("Hello World!")); + +``` + +## Clustered Usage + +```go + +var servers = "nats://localhost:1222, nats://localhost:1223, nats://localhost:1224" + +nc, err := nats.Connect(servers) + +// Optionally set ReconnectWait and MaxReconnect attempts. +// This example means 10 seconds total per backend. +nc, err = nats.Connect(servers, nats.MaxReconnects(5), nats.ReconnectWait(2 * time.Second)) + +// You can also add some jitter for the reconnection. +// This call will add up to 500 milliseconds for non TLS connections and 2 seconds for TLS connections. +// If not specified, the library defaults to 100 milliseconds and 1 second, respectively. +nc, err = nats.Connect(servers, nats.ReconnectJitter(500*time.Millisecond, 2*time.Second)) + +// You can also specify a custom reconnect delay handler. If set, the library will invoke it when it has tried +// all URLs in its list. The value returned will be used as the total sleep time, so add your own jitter. +// The library will pass the number of times it went through the whole list. +nc, err = nats.Connect(servers, nats.CustomReconnectDelay(func(attempts int) time.Duration { + return someBackoffFunction(attempts) +})) + +// Optionally disable randomization of the server pool +nc, err = nats.Connect(servers, nats.DontRandomize()) + +// Setup callbacks to be notified on disconnects, reconnects and connection closed. +nc, err = nats.Connect(servers, + nats.DisconnectErrHandler(func(nc *nats.Conn, err error) { + fmt.Printf("Got disconnected! Reason: %q\n", err) + }), + nats.ReconnectHandler(func(nc *nats.Conn) { + fmt.Printf("Got reconnected to %v!\n", nc.ConnectedUrl()) + }), + nats.ClosedHandler(func(nc *nats.Conn) { + fmt.Printf("Connection closed. Reason: %q\n", nc.LastError()) + }) +) + +// When connecting to a mesh of servers with auto-discovery capabilities, +// you may need to provide a username/password or token in order to connect +// to any server in that mesh when authentication is required. +// Instead of providing the credentials in the initial URL, you will use +// new option setters: +nc, err = nats.Connect("nats://localhost:4222", nats.UserInfo("foo", "bar")) + +// For token based authentication: +nc, err = nats.Connect("nats://localhost:4222", nats.Token("S3cretT0ken")) + +// You can even pass the two at the same time in case one of the server +// in the mesh requires token instead of user name and password. +nc, err = nats.Connect("nats://localhost:4222", + nats.UserInfo("foo", "bar"), + nats.Token("S3cretT0ken")) + +// Note that if credentials are specified in the initial URLs, they take +// precedence on the credentials specified through the options. +// For instance, in the connect call below, the client library will use +// the user "my" and password "pwd" to connect to localhost:4222, however, +// it will use username "foo" and password "bar" when (re)connecting to +// a different server URL that it got as part of the auto-discovery. +nc, err = nats.Connect("nats://my:pwd@localhost:4222", nats.UserInfo("foo", "bar")) + +``` + +## Context support (+Go 1.7) + +```go +ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) +defer cancel() + +nc, err := nats.Connect(nats.DefaultURL) + +// Request with context +msg, err := nc.RequestWithContext(ctx, "foo", []byte("bar")) + +// Synchronous subscriber with context +sub, err := nc.SubscribeSync("foo") +msg, err := sub.NextMsgWithContext(ctx) + +// Encoded Request with context +c, err := nats.NewEncodedConn(nc, nats.JSON_ENCODER) +type request struct { + Message string `json:"message"` +} +type response struct { + Code int `json:"code"` +} +req := &request{Message: "Hello"} +resp := &response{} +err := c.RequestWithContext(ctx, "foo", req, resp) +``` + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. + +[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Fnats-io%2Fgo-nats?ref=badge_large) diff --git a/vendor/github.com/nats-io/nats.go/context.go b/vendor/github.com/nats-io/nats.go/context.go new file mode 100644 index 00000000..39f8e5d0 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/context.go @@ -0,0 +1,245 @@ +// Copyright 2016-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "reflect" +) + +// RequestMsgWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestMsgWithContext(ctx context.Context, msg *Msg) (*Msg, error) { + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err + } + return nc.requestWithContext(ctx, msg.Subject, hdr, msg.Data) +} + +// RequestWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + return nc.requestWithContext(ctx, subj, nil, data) +} + +func (nc *Conn) requestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if nc == nil { + return nil, ErrInvalidConnection + } + // Check whether the context is done already before making + // the request. + if ctx.Err() != nil { + return nil, ctx.Err() + } + + var m *Msg + var err error + + // If user wants the old style. + if nc.useOldRequestStyle() { + m, err = nc.oldRequestWithContext(ctx, subj, hdr, data) + } else { + mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) + if err != nil { + return nil, err + } + + var ok bool + + select { + case m, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-ctx.Done(): + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ctx.Err() + } + } + // Check for no responder status. + if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + m, err = nil, ErrNoResponders + } + return m, err +} + +// oldRequestWithContext utilizes inbox and subscription per request. +func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, hdr, data []byte) (*Msg, error) { + inbox := nc.NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.publish(subj, inbox, hdr, data) + if err != nil { + return nil, err + } + + return s.NextMsgWithContext(ctx) +} + +func (s *Subscription) nextMsgWithContext(ctx context.Context, pullSubInternal, waitIfNoMsg bool) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if s == nil { + return nil, ErrBadSubscription + } + if ctx.Err() != nil { + return nil, ctx.Err() + } + + s.mu.Lock() + err := s.validateNextMsgState(pullSubInternal) + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + // If something is available right away, let's optimize that case. + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } else { + return msg, nil + } + default: + // If internal and we don't want to wait, signal that there is no + // message in the internal queue. + if pullSubInternal && !waitIfNoMsg { + return nil, errNoMessages + } + } + + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + + return msg, nil +} + +// NextMsgWithContext takes a context and returns the next message +// available to a synchronous subscriber, blocking until it is delivered +// or context gets canceled. +func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { + return s.nextMsgWithContext(ctx, false, true) +} + +// FlushWithContext will allow a context to control the duration +// of a Flush() call. This context should be non-nil and should +// have a deadline set. We will return an error if none is present. +func (nc *Conn) FlushWithContext(ctx context.Context) error { + if nc == nil { + return ErrInvalidConnection + } + if ctx == nil { + return ErrInvalidContext + } + _, ok := ctx.Deadline() + if !ok { + return ErrNoDeadlineContext + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + // Create a buffered channel to prevent chan send to block + // in processPong() + ch := make(chan struct{}, 1) + nc.sendPing(ch) + nc.mu.Unlock() + + var err error + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-ctx.Done(): + err = ctx.Err() + } + + if err != nil { + nc.removeFlushEntry(ch) + } + + return err +} + +// RequestWithContext will create an Inbox and perform a Request +// using the provided cancellation context with the Inbox reply +// for the data v. A response will be decoded into the vPtr last parameter. +func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v interface{}, vPtr interface{}) error { + if ctx == nil { + return ErrInvalidContext + } + + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.RequestWithContext(ctx, subject, b) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err := c.Enc.Decode(m.Subject, m.Data, vPtr) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/nats-io/nats.go/dependencies.md b/vendor/github.com/nats-io/nats.go/dependencies.md new file mode 100644 index 00000000..cc986b27 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/dependencies.md @@ -0,0 +1,13 @@ +# External Dependencies + +This file lists the dependencies used in this repository. + +| Dependency | License | +|-|-| +| Go | BSD 3-Clause "New" or "Revised" License | +| github.com/nats-io/nats.go | Apache License 2.0 | +| github.com/golang/protobuf v1.4.2 | BSD 3-Clause "New" or "Revised" License | +| github.com/nats-io/nats-server/v2 v2.1.8-0.20201115145023-f61fa8529a0f | Apache License 2.0 | +| github.com/nats-io/nkeys v0.2.0 | Apache License 2.0 | +| github.com/nats-io/nuid v1.0.1 | Apache License 2.0 | +| google.golang.org/protobuf v1.23.0 | BSD 3-Clause License | diff --git a/vendor/github.com/nats-io/nats.go/enc.go b/vendor/github.com/nats-io/nats.go/enc.go new file mode 100644 index 00000000..3af20d26 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/enc.go @@ -0,0 +1,269 @@ +// Copyright 2012-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + // Default Encoders + "github.com/nats-io/nats.go/encoders/builtin" +) + +// Encoder interface is for all register encoders +type Encoder interface { + Encode(subject string, v interface{}) ([]byte, error) + Decode(subject string, data []byte, vPtr interface{}) error +} + +var encMap map[string]Encoder +var encLock sync.Mutex + +// Indexed names into the Registered Encoders. +const ( + JSON_ENCODER = "json" + GOB_ENCODER = "gob" + DEFAULT_ENCODER = "default" +) + +func init() { + encMap = make(map[string]Encoder) + // Register json, gob and default encoder + RegisterEncoder(JSON_ENCODER, &builtin.JsonEncoder{}) + RegisterEncoder(GOB_ENCODER, &builtin.GobEncoder{}) + RegisterEncoder(DEFAULT_ENCODER, &builtin.DefaultEncoder{}) +} + +// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to +// a nats server and have an extendable encoder system that will encode and decode messages +// from raw Go types. +type EncodedConn struct { + Conn *Conn + Enc Encoder +} + +// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered +// encoder. +func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { + if c == nil { + return nil, errors.New("nats: Nil Connection") + } + if c.IsClosed() { + return nil, ErrConnectionClosed + } + ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} + if ec.Enc == nil { + return nil, fmt.Errorf("no encoder registered for '%s'", encType) + } + return ec, nil +} + +// RegisterEncoder will register the encType with the given Encoder. Useful for customization. +func RegisterEncoder(encType string, enc Encoder) { + encLock.Lock() + defer encLock.Unlock() + encMap[encType] = enc +} + +// EncoderForType will return the registered Encoder for the encType. +func EncoderForType(encType string) Encoder { + encLock.Lock() + defer encLock.Unlock() + return encMap[encType] +} + +// Publish publishes the data argument to the given subject. The data argument +// will be encoded using the associated encoder. +func (c *EncodedConn) Publish(subject string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, _EMPTY_, nil, b) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, reply, nil, b) +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply for the data v. A response will be +// decoded into the vPtr Response. +func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.Request(subject, b, timeout) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err = c.Enc.Decode(m.Subject, m.Data, vPtr) + } + return err +} + +// Handler is a specific callback used for Subscribe. It is generalized to +// an interface{}, but we will discover its format and arguments at runtime +// and perform the correct callback, including demarshaling encoded data +// back into the appropriate struct based on the signature of the Handler. +// +// Handlers are expected to have one of four signatures. +// +// type person struct { +// Name string `json:"name,omitempty"` +// Age uint `json:"age,omitempty"` +// } +// +// handler := func(m *Msg) +// handler := func(p *person) +// handler := func(subject string, o *obj) +// handler := func(subject, reply string, o *obj) +// +// These forms allow a callback to request a raw Msg ptr, where the processing +// of the message from the wire is untouched. Process a JSON representation +// and demarshal it into the given struct, e.g. person. +// There are also variants where the callback wants either the subject, or the +// subject and the reply subject. +type Handler interface{} + +// Dissect the cb Handler's signature +func argInfo(cb Handler) (reflect.Type, int) { + cbType := reflect.TypeOf(cb) + if cbType.Kind() != reflect.Func { + panic("nats: Handler needs to be a func") + } + numArgs := cbType.NumIn() + if numArgs == 0 { + return nil, numArgs + } + return cbType.In(numArgs - 1), numArgs +} + +var emptyMsgType = reflect.TypeOf(&Msg{}) + +// Subscribe will create a subscription on the given subject and process incoming +// messages using the specified Handler. The Handler should be a func that matches +// a signature from the description of Handler from above. +func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, _EMPTY_, cb) +} + +// QueueSubscribe will create a queue subscription on the given subject and process +// incoming messages using the specified Handler. The Handler should be a func that +// matches a signature from the description of Handler from above. +func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, queue, cb) +} + +// Internal implementation that all public functions will use. +func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { + if cb == nil { + return nil, errors.New("nats: Handler required for EncodedConn Subscription") + } + argType, numArgs := argInfo(cb) + if argType == nil { + return nil, errors.New("nats: Handler requires at least one argument") + } + + cbValue := reflect.ValueOf(cb) + wantsRaw := (argType == emptyMsgType) + + natsCB := func(m *Msg) { + var oV []reflect.Value + if wantsRaw { + oV = []reflect.Value{reflect.ValueOf(m)} + } else { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach.push(func() { + c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) + }) + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + + // Callback Arity + switch numArgs { + case 1: + oV = []reflect.Value{oPtr} + case 2: + subV := reflect.ValueOf(m.Subject) + oV = []reflect.Value{subV, oPtr} + case 3: + subV := reflect.ValueOf(m.Subject) + replyV := reflect.ValueOf(m.Reply) + oV = []reflect.Value{subV, replyV, oPtr} + } + + } + cbValue.Call(oV) + } + + return c.Conn.subscribe(subject, queue, natsCB, nil, false, nil) +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { + return c.Conn.FlushTimeout(timeout) +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (c *EncodedConn) Flush() error { + return c.Conn.Flush() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush(), etc. +func (c *EncodedConn) Close() { + c.Conn.Close() +} + +// Drain will put a connection into a drain state. All subscriptions will +// immediately be put into a drain state. Upon completion, the publishers +// will be drained and can not publish any additional messages. Upon draining +// of the publishers, the connection will be closed. Use the ClosedCB() +// option to know when the connection has moved from draining to closed. +func (c *EncodedConn) Drain() error { + return c.Conn.Drain() +} + +// LastError reports the last error encountered via the Connection. +func (c *EncodedConn) LastError() error { + return c.Conn.err +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go new file mode 100644 index 00000000..46d918ee --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/default_enc.go @@ -0,0 +1,117 @@ +// Copyright 2012-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "unsafe" +) + +// DefaultEncoder implementation for EncodedConn. +// This encoder will leave []byte and string untouched, but will attempt to +// turn numbers into appropriate strings that can be decoded. It will also +// propely encoded and decode bools. If will encode a struct, but if you want +// to properly handle structures you should use JsonEncoder. +type DefaultEncoder struct { + // Empty +} + +var trueB = []byte("true") +var falseB = []byte("false") +var nilB = []byte("") + +// Encode +func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) { + switch arg := v.(type) { + case string: + bytes := *(*[]byte)(unsafe.Pointer(&arg)) + return bytes, nil + case []byte: + return arg, nil + case bool: + if arg { + return trueB, nil + } else { + return falseB, nil + } + case nil: + return nilB, nil + default: + var buf bytes.Buffer + fmt.Fprintf(&buf, "%+v", arg) + return buf.Bytes(), nil + } +} + +// Decode +func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error { + // Figure out what it's pointing to... + sData := *(*string)(unsafe.Pointer(&data)) + switch arg := vPtr.(type) { + case *string: + *arg = sData + return nil + case *[]byte: + *arg = data + return nil + case *int: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int(n) + return nil + case *int32: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int32(n) + return nil + case *int64: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int64(n) + return nil + case *float32: + n, err := strconv.ParseFloat(sData, 32) + if err != nil { + return err + } + *arg = float32(n) + return nil + case *float64: + n, err := strconv.ParseFloat(sData, 64) + if err != nil { + return err + } + *arg = float64(n) + return nil + case *bool: + b, err := strconv.ParseBool(sData) + if err != nil { + return err + } + *arg = b + return nil + default: + vt := reflect.TypeOf(arg).Elem() + return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) + } +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go new file mode 100644 index 00000000..632bcbd3 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/gob_enc.go @@ -0,0 +1,45 @@ +// Copyright 2013-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "bytes" + "encoding/gob" +) + +// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/gob to Marshal +// and Unmarshal most types, including structs. +type GobEncoder struct { + // Empty +} + +// FIXME(dlc) - This could probably be more efficient. + +// Encode +func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b := new(bytes.Buffer) + enc := gob.NewEncoder(b) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Decode +func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + dec := gob.NewDecoder(bytes.NewBuffer(data)) + err = dec.Decode(vPtr) + return +} diff --git a/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go new file mode 100644 index 00000000..c9670f31 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/encoders/builtin/json_enc.go @@ -0,0 +1,56 @@ +// Copyright 2012-2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package builtin + +import ( + "encoding/json" + "strings" +) + +// JsonEncoder is a JSON Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/json to Marshal +// and Unmarshal most types, including structs. +type JsonEncoder struct { + // Empty +} + +// Encode +func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Decode +func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + switch arg := vPtr.(type) { + case *string: + // If they want a string and it is a JSON string, strip quotes + // This allows someone to send a struct but receive as a plain string + // This cast should be efficient for Go 1.3 and beyond. + str := string(data) + if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { + *arg = str[1 : len(str)-1] + } else { + *arg = str + } + case *[]byte: + *arg = data + default: + err = json.Unmarshal(data, arg) + } + return +} diff --git a/vendor/github.com/nats-io/nats.go/go_test.mod b/vendor/github.com/nats-io/nats.go/go_test.mod new file mode 100644 index 00000000..b8470360 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/go_test.mod @@ -0,0 +1,20 @@ +module github.com/nats-io/nats.go + +go 1.19 + +require ( + github.com/golang/protobuf v1.4.2 + github.com/nats-io/nats-server/v2 v2.9.6 + github.com/nats-io/nkeys v0.3.0 + github.com/nats-io/nuid v1.0.1 + google.golang.org/protobuf v1.23.0 +) + +require ( + github.com/klauspost/compress v1.15.11 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/nats-io/jwt/v2 v2.3.0 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect +) diff --git a/vendor/github.com/nats-io/nats.go/go_test.sum b/vendor/github.com/nats-io/nats.go/go_test.sum new file mode 100644 index 00000000..1662e18a --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/go_test.sum @@ -0,0 +1,45 @@ +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= +github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= +github.com/nats-io/nats-server/v2 v2.9.6 h1:RTtK+rv/4CcliOuqGsy58g7MuWkBaWmF5TUNwuUo9Uw= +github.com/nats-io/nats-server/v2 v2.9.6/go.mod h1:AB6hAnGZDlYfqb7CTAm66ZKMZy9DpfierY1/PbpvI2g= +github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= diff --git a/vendor/github.com/nats-io/nats.go/js.go b/vendor/github.com/nats-io/nats.go/js.go new file mode 100644 index 00000000..c1be7627 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/js.go @@ -0,0 +1,3634 @@ +// Copyright 2020-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "math/rand" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nuid" +) + +// JetStream allows persistent messaging through JetStream. +type JetStream interface { + // Publish publishes a message to JetStream. + Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) + + // PublishMsg publishes a Msg to JetStream. + PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) + + // PublishAsync publishes a message to JetStream and returns a PubAckFuture. + // The data should not be changed until the PubAckFuture has been processed. + PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) + + // PublishMsgAsync publishes a Msg to JetStream and returns a PubAckFuture. + // The message should not be changed until the PubAckFuture has been processed. + PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) + + // PublishAsyncPending returns the number of async publishes outstanding for this context. + PublishAsyncPending() int + + // PublishAsyncComplete returns a channel that will be closed when all outstanding messages are ack'd. + PublishAsyncComplete() <-chan struct{} + + // Subscribe creates an async Subscription for JetStream. + // The stream and consumer names can be provided with the nats.Bind() option. + // For creating an ephemeral (where the consumer name is picked by the server), + // you can provide the stream name with nats.BindStream(). + // If no stream name is specified, the library will attempt to figure out which + // stream the subscription is for. See important notes below for more details. + // + // IMPORTANT NOTES: + // * If none of the options Bind() nor Durable() are specified, the library will + // send a request to the server to create an ephemeral JetStream consumer, + // which will be deleted after an Unsubscribe() or Drain(), or automatically + // by the server after a short period of time after the NATS subscription is + // gone. + // * If Durable() option is specified, the library will attempt to lookup a JetStream + // consumer with this name, and if found, will bind to it and not attempt to + // delete it. However, if not found, the library will send a request to + // create such durable JetStream consumer. Note that the library will delete + // the JetStream consumer after an Unsubscribe() or Drain() only if it + // created the durable consumer while subscribing. If the durable consumer + // already existed prior to subscribing it won't be deleted. + // * If Bind() option is provided, the library will attempt to lookup the + // consumer with the given name, and if successful, bind to it. If the lookup fails, + // then the Subscribe() call will return an error. + Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) + + // SubscribeSync creates a Subscription that can be used to process messages synchronously. + // See important note in Subscribe() + SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) + + // ChanSubscribe creates channel based Subscription. + // See important note in Subscribe() + ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + + // ChanQueueSubscribe creates channel based Subscription with a queue group. + // See important note in QueueSubscribe() + ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) + + // QueueSubscribe creates a Subscription with a queue group. + // If no optional durable name nor binding options are specified, the queue name will be used as a durable name. + // See important note in Subscribe() + QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) + + // QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. + // See important note in QueueSubscribe() + QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) + + // PullSubscribe creates a Subscription that can fetch messages. + // See important note in Subscribe(). Additionally, for an ephemeral pull consumer, the "durable" value must be + // set to an empty string. + PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) +} + +// JetStreamContext allows JetStream messaging and stream management. +type JetStreamContext interface { + JetStream + JetStreamManager + KeyValueManager + ObjectStoreManager +} + +// Request API subjects for JetStream. +const ( + // defaultAPIPrefix is the default prefix for the JetStream API. + defaultAPIPrefix = "$JS.API." + + // jsDomainT is used to create JetStream API prefix by specifying only Domain + jsDomainT = "$JS.%s.API." + + // jsExtDomainT is used to create a StreamSource External APIPrefix + jsExtDomainT = "$JS.%s.API" + + // apiAccountInfo is for obtaining general information about JetStream. + apiAccountInfo = "INFO" + + // apiConsumerCreateT is used to create consumers. + // it accepts stream name and consumer name. + apiConsumerCreateT = "CONSUMER.CREATE.%s.%s" + + // apiConsumerCreateT is used to create consumers. + // it accepts stream name, consumer name and filter subject + apiConsumerCreateWithFilterSubjectT = "CONSUMER.CREATE.%s.%s.%s" + + // apiLegacyConsumerCreateT is used to create consumers. + // this is a legacy endpoint to support creating ephemerals before nats-server v2.9.0. + apiLegacyConsumerCreateT = "CONSUMER.CREATE.%s" + + // apiDurableCreateT is used to create durable consumers. + // this is a legacy endpoint to support creating durable consumers before nats-server v2.9.0. + apiDurableCreateT = "CONSUMER.DURABLE.CREATE.%s.%s" + + // apiConsumerInfoT is used to create consumers. + apiConsumerInfoT = "CONSUMER.INFO.%s.%s" + + // apiRequestNextT is the prefix for the request next message(s) for a consumer in worker/pull mode. + apiRequestNextT = "CONSUMER.MSG.NEXT.%s.%s" + + // apiConsumerDeleteT is used to delete consumers. + apiConsumerDeleteT = "CONSUMER.DELETE.%s.%s" + + // apiConsumerListT is used to return all detailed consumer information + apiConsumerListT = "CONSUMER.LIST.%s" + + // apiConsumerNamesT is used to return a list with all consumer names for the stream. + apiConsumerNamesT = "CONSUMER.NAMES.%s" + + // apiStreams can lookup a stream by subject. + apiStreams = "STREAM.NAMES" + + // apiStreamCreateT is the endpoint to create new streams. + apiStreamCreateT = "STREAM.CREATE.%s" + + // apiStreamInfoT is the endpoint to get information on a stream. + apiStreamInfoT = "STREAM.INFO.%s" + + // apiStreamUpdateT is the endpoint to update existing streams. + apiStreamUpdateT = "STREAM.UPDATE.%s" + + // apiStreamDeleteT is the endpoint to delete streams. + apiStreamDeleteT = "STREAM.DELETE.%s" + + // apiStreamPurgeT is the endpoint to purge streams. + apiStreamPurgeT = "STREAM.PURGE.%s" + + // apiStreamListT is the endpoint that will return all detailed stream information + apiStreamListT = "STREAM.LIST" + + // apiMsgGetT is the endpoint to get a message. + apiMsgGetT = "STREAM.MSG.GET.%s" + + // apiMsgGetT is the endpoint to perform a direct get of a message. + apiDirectMsgGetT = "DIRECT.GET.%s" + + // apiDirectMsgGetLastBySubjectT is the endpoint to perform a direct get of a message by subject. + apiDirectMsgGetLastBySubjectT = "DIRECT.GET.%s.%s" + + // apiMsgDeleteT is the endpoint to remove a message. + apiMsgDeleteT = "STREAM.MSG.DELETE.%s" + + // orderedHeartbeatsInterval is how fast we want HBs from the server during idle. + orderedHeartbeatsInterval = 5 * time.Second + + // Scale for threshold of missed HBs or lack of activity. + hbcThresh = 2 + + // For ChanSubscription, we can't update sub.delivered as we do for other + // type of subscriptions, since the channel is user provided. + // With flow control in play, we will check for flow control on incoming + // messages (as opposed to when they are delivered), but also from a go + // routine. Without this, the subscription would possibly stall until + // a new message or heartbeat/fc are received. + chanSubFCCheckInterval = 250 * time.Millisecond + + // Default time wait between retries on Publish iff err is NoResponders. + DefaultPubRetryWait = 250 * time.Millisecond + + // Default number of retries + DefaultPubRetryAttempts = 2 + + // defaultAsyncPubAckInflight is the number of async pub acks inflight. + defaultAsyncPubAckInflight = 4000 +) + +// Types of control messages, so far heartbeat and flow control +const ( + jsCtrlHB = 1 + jsCtrlFC = 2 +) + +// js is an internal struct from a JetStreamContext. +type js struct { + nc *Conn + opts *jsOpts + + // For async publish context. + mu sync.RWMutex + rpre string + rsub *Subscription + pafs map[string]*pubAckFuture + stc chan struct{} + dch chan struct{} + rr *rand.Rand +} + +type jsOpts struct { + ctx context.Context + // For importing JetStream from other accounts. + pre string + // Amount of time to wait for API requests. + wait time.Duration + // For async publish error handling. + aecb MsgErrHandler + // Max async pub ack in flight + maxpa int + // the domain that produced the pre + domain string + // enables protocol tracing + ctrace ClientTrace + shouldTrace bool + // purgeOpts contains optional stream purge options + purgeOpts *StreamPurgeRequest + // streamInfoOpts contains optional stream info options + streamInfoOpts *StreamInfoRequest + // streamListSubject is used for subject filtering when listing streams / stream names + streamListSubject string + // For direct get message requests + directGet bool + // For direct get next message + directNextFor string + + // featureFlags are used to enable/disable specific JetStream features + featureFlags featureFlags +} + +const ( + defaultRequestWait = 5 * time.Second + defaultAccountCheck = 20 * time.Second +) + +// JetStream returns a JetStreamContext for messaging and stream management. +// Errors are only returned if inconsistent options are provided. +func (nc *Conn) JetStream(opts ...JSOpt) (JetStreamContext, error) { + js := &js{ + nc: nc, + opts: &jsOpts{ + pre: defaultAPIPrefix, + wait: defaultRequestWait, + maxpa: defaultAsyncPubAckInflight, + }, + } + + for _, opt := range opts { + if err := opt.configureJSContext(js.opts); err != nil { + return nil, err + } + } + return js, nil +} + +// JSOpt configures a JetStreamContext. +type JSOpt interface { + configureJSContext(opts *jsOpts) error +} + +// jsOptFn configures an option for the JetStreamContext. +type jsOptFn func(opts *jsOpts) error + +func (opt jsOptFn) configureJSContext(opts *jsOpts) error { + return opt(opts) +} + +type featureFlags struct { + useDurableConsumerCreate bool +} + +// UseLegacyDurableConsumers makes JetStream use the legacy (pre nats-server v2.9.0) subjects for consumer creation. +// If this option is used when creating JetStremContext, $JS.API.CONSUMER.DURABLE.CREATE.. will be used +// to create a consumer with Durable provided, rather than $JS.API.CONSUMER.CREATE... +func UseLegacyDurableConsumers() JSOpt { + return jsOptFn(func(opts *jsOpts) error { + opts.featureFlags.useDurableConsumerCreate = true + return nil + }) +} + +// ClientTrace can be used to trace API interactions for the JetStream Context. +type ClientTrace struct { + RequestSent func(subj string, payload []byte) + ResponseReceived func(subj string, payload []byte, hdr Header) +} + +func (ct ClientTrace) configureJSContext(js *jsOpts) error { + js.ctrace = ct + js.shouldTrace = true + return nil +} + +// Domain changes the domain part of JetStream API prefix. +func Domain(domain string) JSOpt { + if domain == _EMPTY_ { + return APIPrefix(_EMPTY_) + } + + return jsOptFn(func(js *jsOpts) error { + js.domain = domain + js.pre = fmt.Sprintf(jsDomainT, domain) + + return nil + }) + +} + +func (s *StreamPurgeRequest) configureJSContext(js *jsOpts) error { + js.purgeOpts = s + return nil +} + +func (s *StreamInfoRequest) configureJSContext(js *jsOpts) error { + js.streamInfoOpts = s + return nil +} + +// APIPrefix changes the default prefix used for the JetStream API. +func APIPrefix(pre string) JSOpt { + return jsOptFn(func(js *jsOpts) error { + if pre == _EMPTY_ { + return nil + } + + js.pre = pre + if !strings.HasSuffix(js.pre, ".") { + js.pre = js.pre + "." + } + + return nil + }) +} + +// DirectGet is an option that can be used to make GetMsg() or GetLastMsg() +// retrieve message directly from a group of servers (leader and replicas) +// if the stream was created with the AllowDirect option. +func DirectGet() JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.directGet = true + return nil + }) +} + +// DirectGetNext is an option that can be used to make GetMsg() retrieve message +// directly from a group of servers (leader and replicas) if the stream was +// created with the AllowDirect option. +// The server will find the next message matching the filter `subject` starting +// at the start sequence (argument in GetMsg()). The filter `subject` can be a +// wildcard. +func DirectGetNext(subject string) JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.directGet = true + js.directNextFor = subject + return nil + }) +} + +// StreamListFilter is an option that can be used to configure `StreamsInfo()` and `StreamNames()` requests. +// It allows filtering the retured streams by subject associated with each stream. +// Wildcards can be used. For example, `StreamListFilter(FOO.*.A) will return +// all streams which have at least one subject matching the provided pattern (e.g. FOO.TEST.A). +func StreamListFilter(subject string) JSOpt { + return jsOptFn(func(opts *jsOpts) error { + opts.streamListSubject = subject + return nil + }) +} + +func (js *js) apiSubj(subj string) string { + if js.opts.pre == _EMPTY_ { + return subj + } + var b strings.Builder + b.WriteString(js.opts.pre) + b.WriteString(subj) + return b.String() +} + +// PubOpt configures options for publishing JetStream messages. +type PubOpt interface { + configurePublish(opts *pubOpts) error +} + +// pubOptFn is a function option used to configure JetStream Publish. +type pubOptFn func(opts *pubOpts) error + +func (opt pubOptFn) configurePublish(opts *pubOpts) error { + return opt(opts) +} + +type pubOpts struct { + ctx context.Context + ttl time.Duration + id string + lid string // Expected last msgId + str string // Expected stream name + seq *uint64 // Expected last sequence + lss *uint64 // Expected last sequence per subject + + // Publish retries for NoResponders err. + rwait time.Duration // Retry wait between attempts + rnum int // Retry attempts + + // stallWait is the max wait of a async pub ack. + stallWait time.Duration +} + +// pubAckResponse is the ack response from the JetStream API when publishing a message. +type pubAckResponse struct { + apiResponse + *PubAck +} + +// PubAck is an ack received after successfully publishing a message. +type PubAck struct { + Stream string `json:"stream"` + Sequence uint64 `json:"seq"` + Duplicate bool `json:"duplicate,omitempty"` + Domain string `json:"domain,omitempty"` +} + +// Headers for published messages. +const ( + MsgIdHdr = "Nats-Msg-Id" + ExpectedStreamHdr = "Nats-Expected-Stream" + ExpectedLastSeqHdr = "Nats-Expected-Last-Sequence" + ExpectedLastSubjSeqHdr = "Nats-Expected-Last-Subject-Sequence" + ExpectedLastMsgIdHdr = "Nats-Expected-Last-Msg-Id" + MsgRollup = "Nats-Rollup" +) + +// Headers for republished messages and direct gets. +const ( + JSStream = "Nats-Stream" + JSSequence = "Nats-Sequence" + JSTimeStamp = "Nats-Time-Stamp" + JSSubject = "Nats-Subject" + JSLastSequence = "Nats-Last-Sequence" +) + +// MsgSize is a header that will be part of a consumer's delivered message if HeadersOnly requested. +const MsgSize = "Nats-Msg-Size" + +// Rollups, can be subject only or all messages. +const ( + MsgRollupSubject = "sub" + MsgRollupAll = "all" +) + +// PublishMsg publishes a Msg to a stream from JetStream. +func (js *js) PublishMsg(m *Msg, opts ...PubOpt) (*PubAck, error) { + var o = pubOpts{rwait: DefaultPubRetryWait, rnum: DefaultPubRetryAttempts} + if len(opts) > 0 { + if m.Header == nil { + m.Header = Header{} + } + for _, opt := range opts { + if err := opt.configurePublish(&o); err != nil { + return nil, err + } + } + } + // Check for option collisions. Right now just timeout and context. + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + if o.ttl == 0 && o.ctx == nil { + o.ttl = js.opts.wait + } + if o.stallWait > 0 { + return nil, fmt.Errorf("nats: stall wait cannot be set to sync publish") + } + + if o.id != _EMPTY_ { + m.Header.Set(MsgIdHdr, o.id) + } + if o.lid != _EMPTY_ { + m.Header.Set(ExpectedLastMsgIdHdr, o.lid) + } + if o.str != _EMPTY_ { + m.Header.Set(ExpectedStreamHdr, o.str) + } + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) + } + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) + } + + var resp *Msg + var err error + + if o.ttl > 0 { + resp, err = js.nc.RequestMsg(m, time.Duration(o.ttl)) + } else { + resp, err = js.nc.RequestMsgWithContext(o.ctx, m) + } + + if err != nil { + for r, ttl := 0, o.ttl; err == ErrNoResponders && (r < o.rnum || o.rnum < 0); r++ { + // To protect against small blips in leadership changes etc, if we get a no responders here retry. + if o.ctx != nil { + select { + case <-o.ctx.Done(): + case <-time.After(o.rwait): + } + } else { + time.Sleep(o.rwait) + } + if o.ttl > 0 { + ttl -= o.rwait + if ttl <= 0 { + err = ErrTimeout + break + } + resp, err = js.nc.RequestMsg(m, time.Duration(ttl)) + } else { + resp, err = js.nc.RequestMsgWithContext(o.ctx, m) + } + } + if err != nil { + if err == ErrNoResponders { + err = ErrNoStreamResponse + } + return nil, err + } + } + + var pa pubAckResponse + if err := json.Unmarshal(resp.Data, &pa); err != nil { + return nil, ErrInvalidJSAck + } + if pa.Error != nil { + return nil, pa.Error + } + if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { + return nil, ErrInvalidJSAck + } + return pa.PubAck, nil +} + +// Publish publishes a message to a stream from JetStream. +func (js *js) Publish(subj string, data []byte, opts ...PubOpt) (*PubAck, error) { + return js.PublishMsg(&Msg{Subject: subj, Data: data}, opts...) +} + +// PubAckFuture is a future for a PubAck. +type PubAckFuture interface { + // Ok returns a receive only channel that can be used to get a PubAck. + Ok() <-chan *PubAck + + // Err returns a receive only channel that can be used to get the error from an async publish. + Err() <-chan error + + // Msg returns the message that was sent to the server. + Msg() *Msg +} + +type pubAckFuture struct { + js *js + msg *Msg + pa *PubAck + st time.Time + err error + errCh chan error + doneCh chan *PubAck +} + +func (paf *pubAckFuture) Ok() <-chan *PubAck { + paf.js.mu.Lock() + defer paf.js.mu.Unlock() + + if paf.doneCh == nil { + paf.doneCh = make(chan *PubAck, 1) + if paf.pa != nil { + paf.doneCh <- paf.pa + } + } + + return paf.doneCh +} + +func (paf *pubAckFuture) Err() <-chan error { + paf.js.mu.Lock() + defer paf.js.mu.Unlock() + + if paf.errCh == nil { + paf.errCh = make(chan error, 1) + if paf.err != nil { + paf.errCh <- paf.err + } + } + + return paf.errCh +} + +func (paf *pubAckFuture) Msg() *Msg { + paf.js.mu.RLock() + defer paf.js.mu.RUnlock() + return paf.msg +} + +// For quick token lookup etc. +const aReplyPreLen = 14 +const aReplyTokensize = 6 + +func (js *js) newAsyncReply() string { + js.mu.Lock() + if js.rsub == nil { + // Create our wildcard reply subject. + sha := sha256.New() + sha.Write([]byte(nuid.Next())) + b := sha.Sum(nil) + for i := 0; i < aReplyTokensize; i++ { + b[i] = rdigits[int(b[i]%base)] + } + inboxPrefix := InboxPrefix + if js.nc.Opts.InboxPrefix != _EMPTY_ { + inboxPrefix = js.nc.Opts.InboxPrefix + "." + } + js.rpre = fmt.Sprintf("%s%s.", inboxPrefix, b[:aReplyTokensize]) + sub, err := js.nc.Subscribe(fmt.Sprintf("%s*", js.rpre), js.handleAsyncReply) + if err != nil { + js.mu.Unlock() + return _EMPTY_ + } + js.rsub = sub + js.rr = rand.New(rand.NewSource(time.Now().UnixNano())) + } + var sb strings.Builder + sb.WriteString(js.rpre) + rn := js.rr.Int63() + var b [aReplyTokensize]byte + for i, l := 0, rn; i < len(b); i++ { + b[i] = rdigits[l%base] + l /= base + } + sb.Write(b[:]) + js.mu.Unlock() + return sb.String() +} + +// registerPAF will register for a PubAckFuture. +func (js *js) registerPAF(id string, paf *pubAckFuture) (int, int) { + js.mu.Lock() + if js.pafs == nil { + js.pafs = make(map[string]*pubAckFuture) + } + paf.js = js + js.pafs[id] = paf + np := len(js.pafs) + maxpa := js.opts.maxpa + js.mu.Unlock() + return np, maxpa +} + +// Lock should be held. +func (js *js) getPAF(id string) *pubAckFuture { + if js.pafs == nil { + return nil + } + return js.pafs[id] +} + +// clearPAF will remove a PubAckFuture that was registered. +func (js *js) clearPAF(id string) { + js.mu.Lock() + delete(js.pafs, id) + js.mu.Unlock() +} + +// PublishAsyncPending returns how many PubAckFutures are pending. +func (js *js) PublishAsyncPending() int { + js.mu.RLock() + defer js.mu.RUnlock() + return len(js.pafs) +} + +func (js *js) asyncStall() <-chan struct{} { + js.mu.Lock() + if js.stc == nil { + js.stc = make(chan struct{}) + } + stc := js.stc + js.mu.Unlock() + return stc +} + +// Handle an async reply from PublishAsync. +func (js *js) handleAsyncReply(m *Msg) { + if len(m.Subject) <= aReplyPreLen { + return + } + id := m.Subject[aReplyPreLen:] + + js.mu.Lock() + paf := js.getPAF(id) + if paf == nil { + js.mu.Unlock() + return + } + // Remove + delete(js.pafs, id) + + // Check on anyone stalled and waiting. + if js.stc != nil && len(js.pafs) < js.opts.maxpa { + close(js.stc) + js.stc = nil + } + // Check on anyone one waiting on done status. + if js.dch != nil && len(js.pafs) == 0 { + dch := js.dch + js.dch = nil + // Defer here so error is processed and can be checked. + defer close(dch) + } + + doErr := func(err error) { + paf.err = err + if paf.errCh != nil { + paf.errCh <- paf.err + } + cb := js.opts.aecb + js.mu.Unlock() + if cb != nil { + cb(paf.js, paf.msg, err) + } + } + + // Process no responders etc. + if len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + doErr(ErrNoResponders) + return + } + + var pa pubAckResponse + if err := json.Unmarshal(m.Data, &pa); err != nil { + doErr(ErrInvalidJSAck) + return + } + if pa.Error != nil { + doErr(pa.Error) + return + } + if pa.PubAck == nil || pa.PubAck.Stream == _EMPTY_ { + doErr(ErrInvalidJSAck) + return + } + + // So here we have received a proper puback. + paf.pa = pa.PubAck + if paf.doneCh != nil { + paf.doneCh <- paf.pa + } + js.mu.Unlock() +} + +// MsgErrHandler is used to process asynchronous errors from +// JetStream PublishAsync. It will return the original +// message sent to the server for possible retransmitting and the error encountered. +type MsgErrHandler func(JetStream, *Msg, error) + +// PublishAsyncErrHandler sets the error handler for async publishes in JetStream. +func PublishAsyncErrHandler(cb MsgErrHandler) JSOpt { + return jsOptFn(func(js *jsOpts) error { + js.aecb = cb + return nil + }) +} + +// PublishAsyncMaxPending sets the maximum outstanding async publishes that can be inflight at one time. +func PublishAsyncMaxPending(max int) JSOpt { + return jsOptFn(func(js *jsOpts) error { + if max < 1 { + return errors.New("nats: max ack pending should be >= 1") + } + js.maxpa = max + return nil + }) +} + +// PublishAsync publishes a message to JetStream and returns a PubAckFuture +func (js *js) PublishAsync(subj string, data []byte, opts ...PubOpt) (PubAckFuture, error) { + return js.PublishMsgAsync(&Msg{Subject: subj, Data: data}, opts...) +} + +const defaultStallWait = 200 * time.Millisecond + +func (js *js) PublishMsgAsync(m *Msg, opts ...PubOpt) (PubAckFuture, error) { + var o pubOpts + if len(opts) > 0 { + if m.Header == nil { + m.Header = Header{} + } + for _, opt := range opts { + if err := opt.configurePublish(&o); err != nil { + return nil, err + } + } + } + + // Timeouts and contexts do not make sense for these. + if o.ttl != 0 || o.ctx != nil { + return nil, ErrContextAndTimeout + } + stallWait := defaultStallWait + if o.stallWait > 0 { + stallWait = o.stallWait + } + + // FIXME(dlc) - Make common. + if o.id != _EMPTY_ { + m.Header.Set(MsgIdHdr, o.id) + } + if o.lid != _EMPTY_ { + m.Header.Set(ExpectedLastMsgIdHdr, o.lid) + } + if o.str != _EMPTY_ { + m.Header.Set(ExpectedStreamHdr, o.str) + } + if o.seq != nil { + m.Header.Set(ExpectedLastSeqHdr, strconv.FormatUint(*o.seq, 10)) + } + if o.lss != nil { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(*o.lss, 10)) + } + + // Reply + if m.Reply != _EMPTY_ { + return nil, errors.New("nats: reply subject should be empty") + } + reply := m.Reply + m.Reply = js.newAsyncReply() + defer func() { m.Reply = reply }() + + if m.Reply == _EMPTY_ { + return nil, errors.New("nats: error creating async reply handler") + } + + id := m.Reply[aReplyPreLen:] + paf := &pubAckFuture{msg: m, st: time.Now()} + numPending, maxPending := js.registerPAF(id, paf) + + if maxPending > 0 && numPending >= maxPending { + select { + case <-js.asyncStall(): + case <-time.After(stallWait): + js.clearPAF(id) + return nil, errors.New("nats: stalled with too many outstanding async published messages") + } + } + if err := js.nc.PublishMsg(m); err != nil { + js.clearPAF(id) + return nil, err + } + + return paf, nil +} + +// PublishAsyncComplete returns a channel that will be closed when all outstanding messages have been ack'd. +func (js *js) PublishAsyncComplete() <-chan struct{} { + js.mu.Lock() + defer js.mu.Unlock() + if js.dch == nil { + js.dch = make(chan struct{}) + } + dch := js.dch + if len(js.pafs) == 0 { + close(js.dch) + js.dch = nil + } + return dch +} + +// MsgId sets the message ID used for deduplication. +func MsgId(id string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.id = id + return nil + }) +} + +// ExpectStream sets the expected stream to respond from the publish. +func ExpectStream(stream string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.str = stream + return nil + }) +} + +// ExpectLastSequence sets the expected sequence in the response from the publish. +func ExpectLastSequence(seq uint64) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.seq = &seq + return nil + }) +} + +// ExpectLastSequencePerSubject sets the expected sequence per subject in the response from the publish. +func ExpectLastSequencePerSubject(seq uint64) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.lss = &seq + return nil + }) +} + +// ExpectLastMsgId sets the expected last msgId in the response from the publish. +func ExpectLastMsgId(id string) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.lid = id + return nil + }) +} + +// RetryWait sets the retry wait time when ErrNoResponders is encountered. +func RetryWait(dur time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rwait = dur + return nil + }) +} + +// RetryAttempts sets the retry number of attempts when ErrNoResponders is encountered. +func RetryAttempts(num int) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + opts.rnum = num + return nil + }) +} + +// StallWait sets the max wait when the producer becomes stall producing messages. +func StallWait(ttl time.Duration) PubOpt { + return pubOptFn(func(opts *pubOpts) error { + if ttl <= 0 { + return fmt.Errorf("nats: stall wait should be more than 0") + } + opts.stallWait = ttl + return nil + }) +} + +type ackOpts struct { + ttl time.Duration + ctx context.Context + nakDelay time.Duration +} + +// AckOpt are the options that can be passed when acknowledge a message. +type AckOpt interface { + configureAck(opts *ackOpts) error +} + +// MaxWait sets the maximum amount of time we will wait for a response. +type MaxWait time.Duration + +func (ttl MaxWait) configureJSContext(js *jsOpts) error { + js.wait = time.Duration(ttl) + return nil +} + +func (ttl MaxWait) configurePull(opts *pullOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +// AckWait sets the maximum amount of time we will wait for an ack. +type AckWait time.Duration + +func (ttl AckWait) configurePublish(opts *pubOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +func (ttl AckWait) configureSubscribe(opts *subOpts) error { + opts.cfg.AckWait = time.Duration(ttl) + return nil +} + +func (ttl AckWait) configureAck(opts *ackOpts) error { + opts.ttl = time.Duration(ttl) + return nil +} + +// ContextOpt is an option used to set a context.Context. +type ContextOpt struct { + context.Context +} + +func (ctx ContextOpt) configureJSContext(opts *jsOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configurePublish(opts *pubOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configureSubscribe(opts *subOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configurePull(opts *pullOpts) error { + opts.ctx = ctx + return nil +} + +func (ctx ContextOpt) configureAck(opts *ackOpts) error { + opts.ctx = ctx + return nil +} + +// Context returns an option that can be used to configure a context for APIs +// that are context aware such as those part of the JetStream interface. +func Context(ctx context.Context) ContextOpt { + return ContextOpt{ctx} +} + +type nakDelay time.Duration + +func (d nakDelay) configureAck(opts *ackOpts) error { + opts.nakDelay = time.Duration(d) + return nil +} + +// Subscribe + +// ConsumerConfig is the configuration of a JetStream consumer. +type ConsumerConfig struct { + Durable string `json:"durable_name,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + DeliverPolicy DeliverPolicy `json:"deliver_policy"` + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + AckPolicy AckPolicy `json:"ack_policy"` + AckWait time.Duration `json:"ack_wait,omitempty"` + MaxDeliver int `json:"max_deliver,omitempty"` + BackOff []time.Duration `json:"backoff,omitempty"` + FilterSubject string `json:"filter_subject,omitempty"` + ReplayPolicy ReplayPolicy `json:"replay_policy"` + RateLimit uint64 `json:"rate_limit_bps,omitempty"` // Bits per sec + SampleFrequency string `json:"sample_freq,omitempty"` + MaxWaiting int `json:"max_waiting,omitempty"` + MaxAckPending int `json:"max_ack_pending,omitempty"` + FlowControl bool `json:"flow_control,omitempty"` + Heartbeat time.Duration `json:"idle_heartbeat,omitempty"` + HeadersOnly bool `json:"headers_only,omitempty"` + + // Pull based options. + MaxRequestBatch int `json:"max_batch,omitempty"` + MaxRequestExpires time.Duration `json:"max_expires,omitempty"` + MaxRequestMaxBytes int `json:"max_bytes,omitempty"` + + // Push based consumers. + DeliverSubject string `json:"deliver_subject,omitempty"` + DeliverGroup string `json:"deliver_group,omitempty"` + + // Inactivity threshold. + InactiveThreshold time.Duration `json:"inactive_threshold,omitempty"` + + // Generally inherited by parent stream and other markers, now can be configured directly. + Replicas int `json:"num_replicas"` + // Force memory storage. + MemoryStorage bool `json:"mem_storage,omitempty"` +} + +// ConsumerInfo is the info from a JetStream consumer. +type ConsumerInfo struct { + Stream string `json:"stream_name"` + Name string `json:"name"` + Created time.Time `json:"created"` + Config ConsumerConfig `json:"config"` + Delivered SequenceInfo `json:"delivered"` + AckFloor SequenceInfo `json:"ack_floor"` + NumAckPending int `json:"num_ack_pending"` + NumRedelivered int `json:"num_redelivered"` + NumWaiting int `json:"num_waiting"` + NumPending uint64 `json:"num_pending"` + Cluster *ClusterInfo `json:"cluster,omitempty"` + PushBound bool `json:"push_bound,omitempty"` +} + +// SequenceInfo has both the consumer and the stream sequence and last activity. +type SequenceInfo struct { + Consumer uint64 `json:"consumer_seq"` + Stream uint64 `json:"stream_seq"` + Last *time.Time `json:"last_active,omitempty"` +} + +// SequencePair includes the consumer and stream sequence info from a JetStream consumer. +type SequencePair struct { + Consumer uint64 `json:"consumer_seq"` + Stream uint64 `json:"stream_seq"` +} + +// nextRequest is for getting next messages for pull based consumers from JetStream. +type nextRequest struct { + Expires time.Duration `json:"expires,omitempty"` + Batch int `json:"batch,omitempty"` + NoWait bool `json:"no_wait,omitempty"` + MaxBytes int `json:"max_bytes,omitempty"` +} + +// jsSub includes JetStream subscription info. +type jsSub struct { + js *js + + // For pull subscribers, this is the next message subject to send requests to. + nms string + + psubj string // the subject that was passed by user to the subscribe calls + consumer string + stream string + deliver string + pull bool + dc bool // Delete JS consumer + ackNone bool + + // This is ConsumerInfo's Pending+Consumer.Delivered that we get from the + // add consumer response. Note that some versions of the server gather the + // consumer info *after* the creation of the consumer, which means that + // some messages may have been already delivered. So the sum of the two + // is a more accurate representation of the number of messages pending or + // in the process of being delivered to the subscription when created. + pending uint64 + + // Ordered consumers + ordered bool + dseq uint64 + sseq uint64 + ccreq *createConsumerRequest + + // Heartbeats and Flow Control handling from push consumers. + hbc *time.Timer + hbi time.Duration + active bool + cmeta string + fcr string + fcd uint64 + fciseq uint64 + csfct *time.Timer + + // Cancellation function to cancel context on drain/unsubscribe. + cancel func() +} + +// Deletes the JS Consumer. +// No connection nor subscription lock must be held on entry. +func (sub *Subscription) deleteConsumer() error { + sub.mu.Lock() + jsi := sub.jsi + if jsi == nil { + sub.mu.Unlock() + return nil + } + stream, consumer := jsi.stream, jsi.consumer + js := jsi.js + sub.mu.Unlock() + + return js.DeleteConsumer(stream, consumer) +} + +// SubOpt configures options for subscribing to JetStream consumers. +type SubOpt interface { + configureSubscribe(opts *subOpts) error +} + +// subOptFn is a function option used to configure a JetStream Subscribe. +type subOptFn func(opts *subOpts) error + +func (opt subOptFn) configureSubscribe(opts *subOpts) error { + return opt(opts) +} + +// Subscribe creates an async Subscription for JetStream. +// The stream and consumer names can be provided with the nats.Bind() option. +// For creating an ephemeral (where the consumer name is picked by the server), +// you can provide the stream name with nats.BindStream(). +// If no stream name is specified, the library will attempt to figure out which +// stream the subscription is for. See important notes below for more details. +// +// IMPORTANT NOTES: +// * If none of the options Bind() nor Durable() are specified, the library will +// send a request to the server to create an ephemeral JetStream consumer, +// which will be deleted after an Unsubscribe() or Drain(), or automatically +// by the server after a short period of time after the NATS subscription is +// gone. +// * If Durable() option is specified, the library will attempt to lookup a JetStream +// consumer with this name, and if found, will bind to it and not attempt to +// delete it. However, if not found, the library will send a request to create +// such durable JetStream consumer. The library will delete the JetStream consumer +// after an Unsubscribe() or Drain(). +// * If Bind() option is provided, the library will attempt to lookup the +// consumer with the given name, and if successful, bind to it. If the lookup fails, +// then the Subscribe() call will return an error. +func (js *js) Subscribe(subj string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { + if cb == nil { + return nil, ErrBadSubscription + } + return js.subscribe(subj, _EMPTY_, cb, nil, false, false, opts) +} + +// SubscribeSync creates a Subscription that can be used to process messages synchronously. +// See important note in Subscribe() +func (js *js) SubscribeSync(subj string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + return js.subscribe(subj, _EMPTY_, nil, mch, true, false, opts) +} + +// QueueSubscribe creates a Subscription with a queue group. +// If no optional durable name nor binding options are specified, the queue name will be used as a durable name. +// See important note in Subscribe() +func (js *js) QueueSubscribe(subj, queue string, cb MsgHandler, opts ...SubOpt) (*Subscription, error) { + if cb == nil { + return nil, ErrBadSubscription + } + return js.subscribe(subj, queue, cb, nil, false, false, opts) +} + +// QueueSubscribeSync creates a Subscription with a queue group that can be used to process messages synchronously. +// See important note in QueueSubscribe() +func (js *js) QueueSubscribeSync(subj, queue string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + return js.subscribe(subj, queue, nil, mch, true, false, opts) +} + +// ChanSubscribe creates channel based Subscription. +// Using ChanSubscribe without buffered capacity is not recommended since +// it will be prone to dropping messages with a slow consumer error. Make sure to give the channel enough +// capacity to handle bursts in traffic, for example other Subscribe APIs use a default of 512k capacity in comparison. +// See important note in Subscribe() +func (js *js) ChanSubscribe(subj string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { + return js.subscribe(subj, _EMPTY_, nil, ch, false, false, opts) +} + +// ChanQueueSubscribe creates channel based Subscription with a queue group. +// See important note in QueueSubscribe() +func (js *js) ChanQueueSubscribe(subj, queue string, ch chan *Msg, opts ...SubOpt) (*Subscription, error) { + return js.subscribe(subj, queue, nil, ch, false, false, opts) +} + +// PullSubscribe creates a Subscription that can fetch messages. +// See important note in Subscribe() +func (js *js) PullSubscribe(subj, durable string, opts ...SubOpt) (*Subscription, error) { + mch := make(chan *Msg, js.nc.Opts.SubChanLen) + if durable != "" { + opts = append(opts, Durable(durable)) + } + return js.subscribe(subj, _EMPTY_, nil, mch, true, true, opts) +} + +func processConsInfo(info *ConsumerInfo, userCfg *ConsumerConfig, isPullMode bool, subj, queue string) (string, error) { + ccfg := &info.Config + + // Make sure this new subject matches or is a subset. + if ccfg.FilterSubject != _EMPTY_ && subj != ccfg.FilterSubject { + return _EMPTY_, ErrSubjectMismatch + } + + // Prevent binding a subscription against incompatible consumer types. + if isPullMode && ccfg.DeliverSubject != _EMPTY_ { + return _EMPTY_, ErrPullSubscribeToPushConsumer + } else if !isPullMode && ccfg.DeliverSubject == _EMPTY_ { + return _EMPTY_, ErrPullSubscribeRequired + } + + // If pull mode, nothing else to check here. + if isPullMode { + return _EMPTY_, checkConfig(ccfg, userCfg) + } + + // At this point, we know the user wants push mode, and the JS consumer is + // really push mode. + + dg := info.Config.DeliverGroup + if dg == _EMPTY_ { + // Prevent an user from attempting to create a queue subscription on + // a JS consumer that was not created with a deliver group. + if queue != _EMPTY_ { + return _EMPTY_, fmt.Errorf("cannot create a queue subscription for a consumer without a deliver group") + } else if info.PushBound { + // Need to reject a non queue subscription to a non queue consumer + // if the consumer is already bound. + return _EMPTY_, fmt.Errorf("consumer is already bound to a subscription") + } + } else { + // If the JS consumer has a deliver group, we need to fail a non queue + // subscription attempt: + if queue == _EMPTY_ { + return _EMPTY_, fmt.Errorf("cannot create a subscription for a consumer with a deliver group %q", dg) + } else if queue != dg { + // Here the user's queue group name does not match the one associated + // with the JS consumer. + return _EMPTY_, fmt.Errorf("cannot create a queue subscription %q for a consumer with a deliver group %q", + queue, dg) + } + } + if err := checkConfig(ccfg, userCfg); err != nil { + return _EMPTY_, err + } + return ccfg.DeliverSubject, nil +} + +func checkConfig(s, u *ConsumerConfig) error { + makeErr := func(fieldName string, usrVal, srvVal interface{}) error { + return fmt.Errorf("configuration requests %s to be %v, but consumer's value is %v", fieldName, usrVal, srvVal) + } + + if u.Durable != _EMPTY_ && u.Durable != s.Durable { + return makeErr("durable", u.Durable, s.Durable) + } + if u.Description != _EMPTY_ && u.Description != s.Description { + return makeErr("description", u.Description, s.Description) + } + if u.DeliverPolicy != deliverPolicyNotSet && u.DeliverPolicy != s.DeliverPolicy { + return makeErr("deliver policy", u.DeliverPolicy, s.DeliverPolicy) + } + if u.OptStartSeq > 0 && u.OptStartSeq != s.OptStartSeq { + return makeErr("optional start sequence", u.OptStartSeq, s.OptStartSeq) + } + if u.OptStartTime != nil && !u.OptStartTime.IsZero() && !(*u.OptStartTime).Equal(*s.OptStartTime) { + return makeErr("optional start time", u.OptStartTime, s.OptStartTime) + } + if u.AckPolicy != ackPolicyNotSet && u.AckPolicy != s.AckPolicy { + return makeErr("ack policy", u.AckPolicy, s.AckPolicy) + } + if u.AckWait > 0 && u.AckWait != s.AckWait { + return makeErr("ack wait", u.AckWait, s.AckWait) + } + if u.MaxDeliver > 0 && u.MaxDeliver != s.MaxDeliver { + return makeErr("max deliver", u.MaxDeliver, s.MaxDeliver) + } + if u.ReplayPolicy != replayPolicyNotSet && u.ReplayPolicy != s.ReplayPolicy { + return makeErr("replay policy", u.ReplayPolicy, s.ReplayPolicy) + } + if u.RateLimit > 0 && u.RateLimit != s.RateLimit { + return makeErr("rate limit", u.RateLimit, s.RateLimit) + } + if u.SampleFrequency != _EMPTY_ && u.SampleFrequency != s.SampleFrequency { + return makeErr("sample frequency", u.SampleFrequency, s.SampleFrequency) + } + if u.MaxWaiting > 0 && u.MaxWaiting != s.MaxWaiting { + return makeErr("max waiting", u.MaxWaiting, s.MaxWaiting) + } + if u.MaxAckPending > 0 && u.MaxAckPending != s.MaxAckPending { + return makeErr("max ack pending", u.MaxAckPending, s.MaxAckPending) + } + // For flow control, we want to fail if the user explicit wanted it, but + // it is not set in the existing consumer. If it is not asked by the user, + // the library still handles it and so no reason to fail. + if u.FlowControl && !s.FlowControl { + return makeErr("flow control", u.FlowControl, s.FlowControl) + } + if u.Heartbeat > 0 && u.Heartbeat != s.Heartbeat { + return makeErr("heartbeat", u.Heartbeat, s.Heartbeat) + } + if u.Replicas > 0 && u.Replicas != s.Replicas { + return makeErr("replicas", u.Replicas, s.Replicas) + } + if u.MemoryStorage && !s.MemoryStorage { + return makeErr("memory storage", u.MemoryStorage, s.MemoryStorage) + } + return nil +} + +func (js *js) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync, isPullMode bool, opts []SubOpt) (*Subscription, error) { + cfg := ConsumerConfig{ + DeliverPolicy: deliverPolicyNotSet, + AckPolicy: ackPolicyNotSet, + ReplayPolicy: replayPolicyNotSet, + } + o := subOpts{cfg: &cfg} + if len(opts) > 0 { + for _, opt := range opts { + if opt == nil { + continue + } + if err := opt.configureSubscribe(&o); err != nil { + return nil, err + } + } + } + + // If no stream name is specified, the subject cannot be empty. + if subj == _EMPTY_ && o.stream == _EMPTY_ { + return nil, fmt.Errorf("nats: subject required") + } + + // Note that these may change based on the consumer info response we may get. + hasHeartbeats := o.cfg.Heartbeat > 0 + hasFC := o.cfg.FlowControl + + // Some checks for pull subscribers + if isPullMode { + // No deliver subject should be provided + if o.cfg.DeliverSubject != _EMPTY_ { + return nil, ErrPullSubscribeToPushConsumer + } + } + + // Some check/setting specific to queue subs + if queue != _EMPTY_ { + // Queue subscriber cannot have HB or FC (since messages will be randomly dispatched + // to members). We may in the future have a separate NATS subscription that all members + // would subscribe to and server would send on. + if o.cfg.Heartbeat > 0 || o.cfg.FlowControl { + // Not making this a public ErrXXX in case we allow in the future. + return nil, fmt.Errorf("nats: queue subscription doesn't support idle heartbeat nor flow control") + } + + // If this is a queue subscription and no consumer nor durable name was specified, + // then we will use the queue name as a durable name. + if o.consumer == _EMPTY_ && o.cfg.Durable == _EMPTY_ { + if err := checkConsumerName(queue); err != nil { + return nil, err + } + o.cfg.Durable = queue + } + } + + var ( + err error + shouldCreate bool + info *ConsumerInfo + deliver string + stream = o.stream + consumer = o.consumer + isDurable = o.cfg.Durable != _EMPTY_ + consumerBound = o.bound + ctx = o.ctx + notFoundErr bool + lookupErr bool + nc = js.nc + nms string + hbi time.Duration + ccreq *createConsumerRequest // In case we need to hold onto it for ordered consumers. + maxap int + ) + + // Do some quick checks here for ordered consumers. We do these here instead of spread out + // in the individual SubOpts. + if o.ordered { + // Make sure we are not durable. + if isDurable { + return nil, fmt.Errorf("nats: durable can not be set for an ordered consumer") + } + // Check ack policy. + if o.cfg.AckPolicy != ackPolicyNotSet { + return nil, fmt.Errorf("nats: ack policy can not be set for an ordered consumer") + } + // Check max deliver. + if o.cfg.MaxDeliver != 1 && o.cfg.MaxDeliver != 0 { + return nil, fmt.Errorf("nats: max deliver can not be set for an ordered consumer") + } + // No deliver subject, we pick our own. + if o.cfg.DeliverSubject != _EMPTY_ { + return nil, fmt.Errorf("nats: deliver subject can not be set for an ordered consumer") + } + // Queue groups not allowed. + if queue != _EMPTY_ { + return nil, fmt.Errorf("nats: queues not be set for an ordered consumer") + } + // Check for bound consumers. + if consumer != _EMPTY_ { + return nil, fmt.Errorf("nats: can not bind existing consumer for an ordered consumer") + } + // Check for pull mode. + if isPullMode { + return nil, fmt.Errorf("nats: can not use pull mode for an ordered consumer") + } + // Setup how we need it to be here. + o.cfg.FlowControl = true + o.cfg.AckPolicy = AckNonePolicy + o.cfg.MaxDeliver = 1 + o.cfg.AckWait = 22 * time.Hour // Just set to something known, not utilized. + // Force R1 and MemoryStorage for these. + o.cfg.Replicas = 1 + o.cfg.MemoryStorage = true + + if !hasHeartbeats { + o.cfg.Heartbeat = orderedHeartbeatsInterval + } + hasFC, hasHeartbeats = true, true + o.mack = true // To avoid auto-ack wrapping call below. + hbi = o.cfg.Heartbeat + } + + // In case a consumer has not been set explicitly, then the + // durable name will be used as the consumer name. + if consumer == _EMPTY_ { + consumer = o.cfg.Durable + } + + // Find the stream mapped to the subject if not bound to a stream already. + if stream == _EMPTY_ { + stream, err = js.StreamNameBySubject(subj) + if err != nil { + return nil, err + } + } + + // With an explicit durable name, we can lookup the consumer first + // to which it should be attaching to. + // If bind to ordered consumer is true, skip the lookup. + if consumer != _EMPTY_ { + info, err = js.ConsumerInfo(stream, consumer) + notFoundErr = errors.Is(err, ErrConsumerNotFound) + lookupErr = err == ErrJetStreamNotEnabled || err == ErrTimeout || err == context.DeadlineExceeded + } + + switch { + case info != nil: + deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) + if err != nil { + return nil, err + } + icfg := &info.Config + hasFC, hbi = icfg.FlowControl, icfg.Heartbeat + hasHeartbeats = hbi > 0 + maxap = icfg.MaxAckPending + case (err != nil && !notFoundErr) || (notFoundErr && consumerBound): + // If the consumer is being bound and we got an error on pull subscribe then allow the error. + if !(isPullMode && lookupErr && consumerBound) { + return nil, err + } + default: + // Attempt to create consumer if not found nor using Bind. + shouldCreate = true + if o.cfg.DeliverSubject != _EMPTY_ { + deliver = o.cfg.DeliverSubject + } else if !isPullMode { + deliver = nc.NewInbox() + cfg.DeliverSubject = deliver + } + + // Do filtering always, server will clear as needed. + cfg.FilterSubject = subj + + // Pass the queue to the consumer config + if queue != _EMPTY_ { + cfg.DeliverGroup = queue + } + + // If not set, default to deliver all + if cfg.DeliverPolicy == deliverPolicyNotSet { + cfg.DeliverPolicy = DeliverAllPolicy + } + // If not set, default to ack explicit. + if cfg.AckPolicy == ackPolicyNotSet { + cfg.AckPolicy = AckExplicitPolicy + } + // If not set, default to instant + if cfg.ReplayPolicy == replayPolicyNotSet { + cfg.ReplayPolicy = ReplayInstantPolicy + } + + // If we have acks at all and the MaxAckPending is not set go ahead + // and set to the internal max for channel based consumers + if cfg.MaxAckPending == 0 && ch != nil && cfg.AckPolicy != AckNonePolicy { + cfg.MaxAckPending = cap(ch) + } + // Create request here. + ccreq = &createConsumerRequest{ + Stream: stream, + Config: &cfg, + } + hbi = cfg.Heartbeat + } + + if isPullMode { + nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, consumer) + deliver = nc.NewInbox() + } + + // In case this has a context, then create a child context that + // is possible to cancel via unsubscribe / drain. + var cancel func() + if ctx != nil { + ctx, cancel = context.WithCancel(ctx) + } + + jsi := &jsSub{ + js: js, + stream: stream, + consumer: consumer, + deliver: deliver, + hbi: hbi, + ordered: o.ordered, + ccreq: ccreq, + dseq: 1, + pull: isPullMode, + nms: nms, + psubj: subj, + cancel: cancel, + ackNone: o.cfg.AckPolicy == AckNonePolicy, + } + + // Auto acknowledge unless manual ack is set or policy is set to AckNonePolicy + if cb != nil && !o.mack && o.cfg.AckPolicy != AckNonePolicy { + ocb := cb + cb = func(m *Msg) { ocb(m); m.Ack() } + } + sub, err := nc.subscribe(deliver, queue, cb, ch, isSync, jsi) + if err != nil { + return nil, err + } + + // If we fail and we had the sub we need to cleanup, but can't just do a straight Unsubscribe or Drain. + // We need to clear the jsi so we do not remove any durables etc. + cleanUpSub := func() { + if sub != nil { + sub.mu.Lock() + sub.jsi = nil + sub.mu.Unlock() + sub.Unsubscribe() + } + } + + // If we are creating or updating let's process that request. + if shouldCreate { + info, err := js.upsertConsumer(stream, cfg.Durable, ccreq.Config) + if err != nil { + var apiErr *APIError + if ok := errors.As(err, &apiErr); !ok { + cleanUpSub() + return nil, err + } + if consumer == _EMPTY_ || + (apiErr.ErrorCode != JSErrCodeConsumerAlreadyExists && apiErr.ErrorCode != JSErrCodeConsumerNameExists) { + cleanUpSub() + if errors.Is(apiErr, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, err + } + // We will not be using this sub here if we were push based. + if !isPullMode { + cleanUpSub() + } + + info, err = js.ConsumerInfo(stream, consumer) + if err != nil { + return nil, err + } + deliver, err = processConsInfo(info, o.cfg, isPullMode, subj, queue) + if err != nil { + return nil, err + } + + if !isPullMode { + // We can't reuse the channel, so if one was passed, we need to create a new one. + if isSync { + ch = make(chan *Msg, cap(ch)) + } else if ch != nil { + // User provided (ChanSubscription), simply try to drain it. + for done := false; !done; { + select { + case <-ch: + default: + done = true + } + } + } + jsi.deliver = deliver + jsi.hbi = info.Config.Heartbeat + + // Recreate the subscription here. + sub, err = nc.subscribe(jsi.deliver, queue, cb, ch, isSync, jsi) + if err != nil { + return nil, err + } + hasFC = info.Config.FlowControl + hasHeartbeats = info.Config.Heartbeat > 0 + } + } else { + // Since the library created the JS consumer, it will delete it on Unsubscribe()/Drain() + sub.mu.Lock() + sub.jsi.dc = true + sub.jsi.pending = info.NumPending + info.Delivered.Consumer + // If this is an ephemeral, we did not have a consumer name, we get it from the info + // after the AddConsumer returns. + if consumer == _EMPTY_ { + sub.jsi.consumer = info.Name + if isPullMode { + sub.jsi.nms = fmt.Sprintf(js.apiSubj(apiRequestNextT), stream, info.Name) + } + } + sub.mu.Unlock() + } + // Capture max ack pending from the info response here which covers both + // success and failure followed by consumer lookup. + maxap = info.Config.MaxAckPending + } + + // If maxap is greater than the default sub's pending limit, use that. + if maxap > DefaultSubPendingMsgsLimit { + // For bytes limit, use the min of maxp*1MB or DefaultSubPendingBytesLimit + bl := maxap * 1024 * 1024 + if bl < DefaultSubPendingBytesLimit { + bl = DefaultSubPendingBytesLimit + } + sub.SetPendingLimits(maxap, bl) + } + + // Do heartbeats last if needed. + if hasHeartbeats { + sub.scheduleHeartbeatCheck() + } + // For ChanSubscriptions, if we know that there is flow control, we will + // start a go routine that evaluates the number of delivered messages + // and process flow control. + if sub.Type() == ChanSubscription && hasFC { + sub.chanSubcheckForFlowControlResponse() + } + + // Wait for context to get canceled if there is one. + if ctx != nil { + go func() { + <-ctx.Done() + sub.Unsubscribe() + }() + } + + return sub, nil +} + +// This long-lived routine is used per ChanSubscription to check +// on the number of delivered messages and check for flow control response. +func (sub *Subscription) chanSubcheckForFlowControlResponse() { + sub.mu.Lock() + // We don't use defer since if we need to send an RC reply, we need + // to do it outside the sub's lock. So doing explicit unlock... + if sub.closed { + sub.mu.Unlock() + return + } + var fcReply string + var nc *Conn + + jsi := sub.jsi + if jsi.csfct == nil { + jsi.csfct = time.AfterFunc(chanSubFCCheckInterval, sub.chanSubcheckForFlowControlResponse) + } else { + fcReply = sub.checkForFlowControlResponse() + nc = sub.conn + // Do the reset here under the lock, it's ok... + jsi.csfct.Reset(chanSubFCCheckInterval) + } + sub.mu.Unlock() + // This call will return an error (which we don't care here) + // if nc is nil or fcReply is empty. + nc.Publish(fcReply, nil) +} + +// ErrConsumerSequenceMismatch represents an error from a consumer +// that received a Heartbeat including sequence different to the +// one expected from the view of the client. +type ErrConsumerSequenceMismatch struct { + // StreamResumeSequence is the stream sequence from where the consumer + // should resume consuming from the stream. + StreamResumeSequence uint64 + + // ConsumerSequence is the sequence of the consumer that is behind. + ConsumerSequence uint64 + + // LastConsumerSequence is the sequence of the consumer when the heartbeat + // was received. + LastConsumerSequence uint64 +} + +func (ecs *ErrConsumerSequenceMismatch) Error() string { + return fmt.Sprintf("nats: sequence mismatch for consumer at sequence %d (%d sequences behind), should restart consumer from stream sequence %d", + ecs.ConsumerSequence, + ecs.LastConsumerSequence-ecs.ConsumerSequence, + ecs.StreamResumeSequence, + ) +} + +// isJSControlMessage will return true if this is an empty control status message +// and indicate what type of control message it is, say jsCtrlHB or jsCtrlFC +func isJSControlMessage(msg *Msg) (bool, int) { + if len(msg.Data) > 0 || msg.Header.Get(statusHdr) != controlMsg { + return false, 0 + } + val := msg.Header.Get(descrHdr) + if strings.HasPrefix(val, "Idle") { + return true, jsCtrlHB + } + if strings.HasPrefix(val, "Flow") { + return true, jsCtrlFC + } + return true, 0 +} + +// Keeps track of the incoming message's reply subject so that the consumer's +// state (deliver sequence, etc..) can be checked against heartbeats. +// We will also bump the incoming data message sequence that is used in FC cases. +// Runs under the subscription lock +func (sub *Subscription) trackSequences(reply string) { + // For flow control, keep track of incoming message sequence. + sub.jsi.fciseq++ + sub.jsi.cmeta = reply +} + +// Check to make sure messages are arriving in order. +// Returns true if the sub had to be replaced. Will cause upper layers to return. +// The caller has verified that sub.jsi != nil and that this is not a control message. +// Lock should be held. +func (sub *Subscription) checkOrderedMsgs(m *Msg) bool { + // Ignore msgs with no reply like HBs and flow control, they are handled elsewhere. + if m.Reply == _EMPTY_ { + return false + } + + // Normal message here. + tokens, err := getMetadataFields(m.Reply) + if err != nil { + return false + } + sseq, dseq := uint64(parseNum(tokens[ackStreamSeqTokenPos])), uint64(parseNum(tokens[ackConsumerSeqTokenPos])) + + jsi := sub.jsi + if dseq != jsi.dseq { + sub.resetOrderedConsumer(jsi.sseq + 1) + return true + } + // Update our tracking here. + jsi.dseq, jsi.sseq = dseq+1, sseq + return false +} + +// Update and replace sid. +// Lock should be held on entry but will be unlocked to prevent lock inversion. +func (sub *Subscription) applyNewSID() (osid int64) { + nc := sub.conn + sub.mu.Unlock() + + nc.subsMu.Lock() + osid = sub.sid + delete(nc.subs, osid) + // Place new one. + nc.ssid++ + nsid := nc.ssid + nc.subs[nsid] = sub + nc.subsMu.Unlock() + + sub.mu.Lock() + sub.sid = nsid + return osid +} + +// We are here if we have detected a gap with an ordered consumer. +// We will create a new consumer and rewire the low level subscription. +// Lock should be held. +func (sub *Subscription) resetOrderedConsumer(sseq uint64) { + nc := sub.conn + if sub.jsi == nil || nc == nil || sub.closed { + return + } + + var maxStr string + // If there was an AUTO_UNSUB done, we need to adjust the new value + // to send after the SUB for the new sid. + if sub.max > 0 { + if sub.jsi.fciseq < sub.max { + adjustedMax := sub.max - sub.jsi.fciseq + maxStr = strconv.Itoa(int(adjustedMax)) + } else { + // We are already at the max, so we should just unsub the + // existing sub and be done + go func(sid int64) { + nc.mu.Lock() + nc.bw.appendString(fmt.Sprintf(unsubProto, sid, _EMPTY_)) + nc.kickFlusher() + nc.mu.Unlock() + }(sub.sid) + return + } + } + + // Quick unsubscribe. Since we know this is a simple push subscriber we do in place. + osid := sub.applyNewSID() + + // Grab new inbox. + newDeliver := nc.NewInbox() + sub.Subject = newDeliver + + // Snapshot the new sid under sub lock. + nsid := sub.sid + + // We are still in the low level readLoop for the connection so we need + // to spin a go routine to try to create the new consumer. + go func() { + // Unsubscribe and subscribe with new inbox and sid. + // Remap a new low level sub into this sub since its client accessible. + // This is done here in this go routine to prevent lock inversion. + nc.mu.Lock() + nc.bw.appendString(fmt.Sprintf(unsubProto, osid, _EMPTY_)) + nc.bw.appendString(fmt.Sprintf(subProto, newDeliver, _EMPTY_, nsid)) + if maxStr != _EMPTY_ { + nc.bw.appendString(fmt.Sprintf(unsubProto, nsid, maxStr)) + } + nc.kickFlusher() + nc.mu.Unlock() + + pushErr := func(err error) { + nc.handleConsumerSequenceMismatch(sub, fmt.Errorf("%w: recreating ordered consumer", err)) + nc.unsubscribe(sub, 0, true) + } + + sub.mu.Lock() + jsi := sub.jsi + // Reset some items in jsi. + jsi.dseq = 1 + jsi.cmeta = _EMPTY_ + jsi.fcr, jsi.fcd = _EMPTY_, 0 + jsi.deliver = newDeliver + // Reset consumer request for starting policy. + cfg := jsi.ccreq.Config + cfg.DeliverSubject = newDeliver + cfg.DeliverPolicy = DeliverByStartSequencePolicy + cfg.OptStartSeq = sseq + + ccSubj := fmt.Sprintf(apiLegacyConsumerCreateT, jsi.stream) + j, err := json.Marshal(jsi.ccreq) + js := jsi.js + sub.mu.Unlock() + + if err != nil { + pushErr(err) + return + } + + resp, err := nc.Request(js.apiSubj(ccSubj), j, js.opts.wait) + if err != nil { + if errors.Is(err, ErrNoResponders) || errors.Is(err, ErrTimeout) { + // if creating consumer failed, retry + return + } + pushErr(err) + return + } + + var cinfo consumerResponse + err = json.Unmarshal(resp.Data, &cinfo) + if err != nil { + pushErr(err) + return + } + + if cinfo.Error != nil { + if cinfo.Error.ErrorCode == JSErrCodeInsufficientResourcesErr { + // retry for insufficient resources, as it may mean that client is connected to a running + // server in cluster while the server hosting R1 JetStream resources is restarting + return + } + pushErr(cinfo.Error) + return + } + + sub.mu.Lock() + jsi.consumer = cinfo.Name + sub.mu.Unlock() + }() +} + +// For jetstream subscriptions, returns the number of delivered messages. +// For ChanSubscription, this value is computed based on the known number +// of messages added to the channel minus the current size of that channel. +// Lock held on entry +func (sub *Subscription) getJSDelivered() uint64 { + if sub.typ == ChanSubscription { + return sub.jsi.fciseq - uint64(len(sub.mch)) + } + return sub.delivered +} + +// checkForFlowControlResponse will check to see if we should send a flow control response +// based on the subscription current delivered index and the target. +// Runs under subscription lock +func (sub *Subscription) checkForFlowControlResponse() string { + // Caller has verified that there is a sub.jsi and fc + jsi := sub.jsi + jsi.active = true + if sub.getJSDelivered() >= jsi.fcd { + fcr := jsi.fcr + jsi.fcr, jsi.fcd = _EMPTY_, 0 + return fcr + } + return _EMPTY_ +} + +// Record an inbound flow control message. +// Runs under subscription lock +func (sub *Subscription) scheduleFlowControlResponse(reply string) { + sub.jsi.fcr, sub.jsi.fcd = reply, sub.jsi.fciseq +} + +// Checks for activity from our consumer. +// If we do not think we are active send an async error. +func (sub *Subscription) activityCheck() { + sub.mu.Lock() + jsi := sub.jsi + if jsi == nil || sub.closed { + sub.mu.Unlock() + return + } + + active := jsi.active + jsi.hbc.Reset(jsi.hbi * hbcThresh) + jsi.active = false + nc := sub.conn + sub.mu.Unlock() + + if !active { + if !jsi.ordered || nc.Status() != CONNECTED { + nc.mu.Lock() + if errCB := nc.Opts.AsyncErrorCB; errCB != nil { + nc.ach.push(func() { errCB(nc, sub, ErrConsumerNotActive) }) + } + nc.mu.Unlock() + return + } + sub.mu.Lock() + sub.resetOrderedConsumer(jsi.sseq + 1) + sub.mu.Unlock() + } +} + +// scheduleHeartbeatCheck sets up the timer check to make sure we are active +// or receiving idle heartbeats.. +func (sub *Subscription) scheduleHeartbeatCheck() { + sub.mu.Lock() + defer sub.mu.Unlock() + + jsi := sub.jsi + if jsi == nil { + return + } + + if jsi.hbc == nil { + jsi.hbc = time.AfterFunc(jsi.hbi*hbcThresh, sub.activityCheck) + } else { + jsi.hbc.Reset(jsi.hbi * hbcThresh) + } +} + +// handleConsumerSequenceMismatch will send an async error that can be used to restart a push based consumer. +func (nc *Conn) handleConsumerSequenceMismatch(sub *Subscription, err error) { + nc.mu.Lock() + errCB := nc.Opts.AsyncErrorCB + if errCB != nil { + nc.ach.push(func() { errCB(nc, sub, err) }) + } + nc.mu.Unlock() +} + +// checkForSequenceMismatch will make sure we have not missed any messages since last seen. +func (nc *Conn) checkForSequenceMismatch(msg *Msg, s *Subscription, jsi *jsSub) { + // Process heartbeat received, get latest control metadata if present. + s.mu.Lock() + ctrl, ordered := jsi.cmeta, jsi.ordered + jsi.active = true + s.mu.Unlock() + + if ctrl == _EMPTY_ { + return + } + + tokens, err := getMetadataFields(ctrl) + if err != nil { + return + } + + // Consumer sequence. + var ldseq string + dseq := tokens[ackConsumerSeqTokenPos] + hdr := msg.Header[lastConsumerSeqHdr] + if len(hdr) == 1 { + ldseq = hdr[0] + } + + // Detect consumer sequence mismatch and whether + // should restart the consumer. + if ldseq != dseq { + // Dispatch async error including details such as + // from where the consumer could be restarted. + sseq := parseNum(tokens[ackStreamSeqTokenPos]) + if ordered { + s.mu.Lock() + s.resetOrderedConsumer(jsi.sseq + 1) + s.mu.Unlock() + } else { + ecs := &ErrConsumerSequenceMismatch{ + StreamResumeSequence: uint64(sseq), + ConsumerSequence: uint64(parseNum(dseq)), + LastConsumerSequence: uint64(parseNum(ldseq)), + } + nc.handleConsumerSequenceMismatch(s, ecs) + } + } +} + +type streamRequest struct { + Subject string `json:"subject,omitempty"` +} + +type streamNamesResponse struct { + apiResponse + apiPaged + Streams []string `json:"streams"` +} + +type subOpts struct { + // For attaching. + stream, consumer string + // For creating or updating. + cfg *ConsumerConfig + // For binding a subscription to a consumer without creating it. + bound bool + // For manual ack + mack bool + // For an ordered consumer. + ordered bool + ctx context.Context +} + +// OrderedConsumer will create a FIFO direct/ephemeral consumer for in order delivery of messages. +// There are no redeliveries and no acks, and flow control and heartbeats will be added but +// will be taken care of without additional client code. +func OrderedConsumer() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.ordered = true + return nil + }) +} + +// ManualAck disables auto ack functionality for async subscriptions. +func ManualAck() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.mack = true + return nil + }) +} + +// Description will set the description for the created consumer. +func Description(description string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.Description = description + return nil + }) +} + +// Durable defines the consumer name for JetStream durable subscribers. +// This function will return ErrInvalidConsumerName in the name contains +// any dot ".". +func Durable(consumer string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if opts.cfg.Durable != _EMPTY_ { + return fmt.Errorf("nats: option Durable set more than once") + } + if opts.consumer != _EMPTY_ && opts.consumer != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.consumer, consumer) + } + if err := checkConsumerName(consumer); err != nil { + return err + } + + opts.cfg.Durable = consumer + return nil + }) +} + +// DeliverAll will configure a Consumer to receive all the +// messages from a Stream. +func DeliverAll() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverAllPolicy + return nil + }) +} + +// DeliverLast configures a Consumer to receive messages +// starting with the latest one. +func DeliverLast() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverLastPolicy + return nil + }) +} + +// DeliverLastPerSubject configures a Consumer to receive messages +// starting with the latest one for each filtered subject. +func DeliverLastPerSubject() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverLastPerSubjectPolicy + return nil + }) +} + +// DeliverNew configures a Consumer to receive messages +// published after the subscription. +func DeliverNew() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverNewPolicy + return nil + }) +} + +// StartSequence configures a Consumer to receive +// messages from a start sequence. +func StartSequence(seq uint64) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverByStartSequencePolicy + opts.cfg.OptStartSeq = seq + return nil + }) +} + +// StartTime configures a Consumer to receive +// messages from a start time. +func StartTime(startTime time.Time) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverPolicy = DeliverByStartTimePolicy + opts.cfg.OptStartTime = &startTime + return nil + }) +} + +// AckNone requires no acks for delivered messages. +func AckNone() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckNonePolicy + return nil + }) +} + +// AckAll when acking a sequence number, this implicitly acks all sequences +// below this one as well. +func AckAll() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckAllPolicy + return nil + }) +} + +// AckExplicit requires ack or nack for all messages. +func AckExplicit() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.AckPolicy = AckExplicitPolicy + return nil + }) +} + +// MaxDeliver sets the number of redeliveries for a message. +func MaxDeliver(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxDeliver = n + return nil + }) +} + +// MaxAckPending sets the number of outstanding acks that are allowed before +// message delivery is halted. +func MaxAckPending(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxAckPending = n + return nil + }) +} + +// ReplayOriginal replays the messages at the original speed. +func ReplayOriginal() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.ReplayPolicy = ReplayOriginalPolicy + return nil + }) +} + +// ReplayInstant replays the messages as fast as possible. +func ReplayInstant() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.ReplayPolicy = ReplayInstantPolicy + return nil + }) +} + +// RateLimit is the Bits per sec rate limit applied to a push consumer. +func RateLimit(n uint64) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.RateLimit = n + return nil + }) +} + +// BackOff is an array of time durations that represent the time to delay based on delivery count. +func BackOff(backOff []time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.BackOff = backOff + return nil + }) +} + +// BindStream binds a consumer to a stream explicitly based on a name. +// When a stream name is not specified, the library uses the subscribe +// subject as a way to find the stream name. It is done by making a request +// to the server to get list of stream names that have a filter for this +// subject. If the returned list contains a single stream, then this +// stream name will be used, otherwise the `ErrNoMatchingStream` is returned. +// To avoid the stream lookup, provide the stream name with this function. +// See also `Bind()`. +func BindStream(stream string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + + opts.stream = stream + return nil + }) +} + +// Bind binds a subscription to an existing consumer from a stream without attempting to create. +// The first argument is the stream name and the second argument will be the consumer name. +func Bind(stream, consumer string) SubOpt { + return subOptFn(func(opts *subOpts) error { + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + + // In case of pull subscribers, the durable name is a required parameter + // so check that they are not different. + if opts.cfg.Durable != _EMPTY_ && opts.cfg.Durable != consumer { + return fmt.Errorf("nats: duplicate consumer names (%s and %s)", opts.cfg.Durable, consumer) + } + if opts.stream != _EMPTY_ && opts.stream != stream { + return fmt.Errorf("nats: duplicate stream name (%s and %s)", opts.stream, stream) + } + opts.stream = stream + opts.consumer = consumer + opts.bound = true + return nil + }) +} + +// EnableFlowControl enables flow control for a push based consumer. +func EnableFlowControl() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.FlowControl = true + return nil + }) +} + +// IdleHeartbeat enables push based consumers to have idle heartbeats delivered. +func IdleHeartbeat(duration time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.Heartbeat = duration + return nil + }) +} + +// DeliverSubject specifies the JetStream consumer deliver subject. +// +// This option is used only in situations where the consumer does not exist +// and a creation request is sent to the server. If not provided, an inbox +// will be selected. +// If a consumer exists, then the NATS subscription will be created on +// the JetStream consumer's DeliverSubject, not necessarily this subject. +func DeliverSubject(subject string) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.DeliverSubject = subject + return nil + }) +} + +// HeadersOnly() will instruct the consumer to only deliver headers and no payloads. +func HeadersOnly() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.HeadersOnly = true + return nil + }) +} + +// MaxRequestBatch sets the maximum pull consumer batch size that a Fetch() +// can request. +func MaxRequestBatch(max int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestBatch = max + return nil + }) +} + +// MaxRequestExpires sets the maximum pull consumer request expiration that a +// Fetch() can request (using the Fetch's timeout value). +func MaxRequestExpires(max time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestExpires = max + return nil + }) +} + +// MaxRequesMaxBytes sets the maximum pull consumer request bytes that a +// Fetch() can receive. +func MaxRequestMaxBytes(bytes int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxRequestMaxBytes = bytes + return nil + }) +} + +// InactiveThreshold indicates how long the server should keep a consumer +// after detecting a lack of activity. In NATS Server 2.8.4 and earlier, this +// option only applies to ephemeral consumers. In NATS Server 2.9.0 and later, +// this option applies to both ephemeral and durable consumers, allowing durable +// consumers to also be deleted automatically after the inactivity threshold has +// passed. +func InactiveThreshold(threshold time.Duration) SubOpt { + return subOptFn(func(opts *subOpts) error { + if threshold < 0 { + return fmt.Errorf("invalid InactiveThreshold value (%v), needs to be greater or equal to 0", threshold) + } + opts.cfg.InactiveThreshold = threshold + return nil + }) +} + +// ConsumerReplicas sets the number of replica count for a consumer. +func ConsumerReplicas(replicas int) SubOpt { + return subOptFn(func(opts *subOpts) error { + if replicas < 1 { + return fmt.Errorf("invalid ConsumerReplicas value (%v), needs to be greater than 0", replicas) + } + opts.cfg.Replicas = replicas + return nil + }) +} + +// ConsumerMemoryStorage sets the memory storage to true for a consumer. +func ConsumerMemoryStorage() SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MemoryStorage = true + return nil + }) +} + +func (sub *Subscription) ConsumerInfo() (*ConsumerInfo, error) { + sub.mu.Lock() + // TODO(dlc) - Better way to mark especially if we attach. + if sub.jsi == nil || sub.jsi.consumer == _EMPTY_ { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + // Consumer info lookup should fail if in direct mode. + js := sub.jsi.js + stream, consumer := sub.jsi.stream, sub.jsi.consumer + sub.mu.Unlock() + + return js.getConsumerInfo(stream, consumer) +} + +type pullOpts struct { + maxBytes int + ttl time.Duration + ctx context.Context +} + +// PullOpt are the options that can be passed when pulling a batch of messages. +type PullOpt interface { + configurePull(opts *pullOpts) error +} + +// PullMaxWaiting defines the max inflight pull requests. +func PullMaxWaiting(n int) SubOpt { + return subOptFn(func(opts *subOpts) error { + opts.cfg.MaxWaiting = n + return nil + }) +} + +// PullMaxBytes defines the max bytes allowed for a fetch request. +type PullMaxBytes int + +func (n PullMaxBytes) configurePull(opts *pullOpts) error { + opts.maxBytes = int(n) + return nil +} + +var ( + // errNoMessages is an error that a Fetch request using no_wait can receive to signal + // that there are no more messages available. + errNoMessages = errors.New("nats: no messages") + + // errRequestsPending is an error that represents a sub.Fetch requests that was using + // no_wait and expires time got discarded by the server. + errRequestsPending = errors.New("nats: requests pending") +) + +// Returns if the given message is a user message or not, and if +// `checkSts` is true, returns appropriate error based on the +// content of the status (404, etc..) +func checkMsg(msg *Msg, checkSts, isNoWait bool) (usrMsg bool, err error) { + // Assume user message + usrMsg = true + + // If payload or no header, consider this a user message + if len(msg.Data) > 0 || len(msg.Header) == 0 { + return + } + // Look for status header + val := msg.Header.Get(statusHdr) + // If not present, then this is considered a user message + if val == _EMPTY_ { + return + } + // At this point, this is not a user message since there is + // no payload and a "Status" header. + usrMsg = false + + // If we don't care about status, we are done. + if !checkSts { + return + } + switch val { + case noResponders: + err = ErrNoResponders + case noMessagesSts: + // 404 indicates that there are no messages. + err = errNoMessages + case reqTimeoutSts: + // In case of a fetch request with no wait request and expires time, + // need to skip 408 errors and retry. + if isNoWait { + err = errRequestsPending + } else { + // Older servers may send a 408 when a request in the server was expired + // and interest is still found, which will be the case for our + // implementation. Regardless, ignore 408 errors until receiving at least + // one message when making requests without no_wait. + err = ErrTimeout + } + case jetStream409Sts: + if strings.Contains(strings.ToLower(string(msg.Header.Get(descrHdr))), "consumer deleted") { + err = ErrConsumerDeleted + break + } + + if strings.Contains(strings.ToLower(string(msg.Header.Get(descrHdr))), "leadership change") { + err = ErrConsumerLeadershipChanged + break + } + fallthrough + default: + err = fmt.Errorf("nats: %s", msg.Header.Get(descrHdr)) + } + return +} + +// Fetch pulls a batch of messages from a stream for a pull consumer. +func (sub *Subscription) Fetch(batch int, opts ...PullOpt) ([]*Msg, error) { + if sub == nil { + return nil, ErrBadSubscription + } + if batch < 1 { + return nil, ErrInvalidArg + } + + var o pullOpts + for _, opt := range opts { + if err := opt.configurePull(&o); err != nil { + return nil, err + } + } + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + + sub.mu.Lock() + jsi := sub.jsi + // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, + // so check for jsi.pull boolean instead. + if jsi == nil || !jsi.pull { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + nc := sub.conn + nms := sub.jsi.nms + rply := sub.jsi.deliver + js := sub.jsi.js + pmc := len(sub.mch) > 0 + + // All fetch requests have an expiration, in case of no explicit expiration + // then the default timeout of the JetStream context is used. + ttl := o.ttl + if ttl == 0 { + ttl = js.opts.wait + } + sub.mu.Unlock() + + // Use the given context or setup a default one for the span + // of the pull batch request. + var ( + ctx = o.ctx + err error + cancel context.CancelFunc + ) + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), ttl) + defer cancel() + } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { + // Prevent from passing the background context which will just block + // and cannot be canceled either. + if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { + return nil, ErrNoDeadlineContext + } + + // If the context did not have a deadline, then create a new child context + // that will use the default timeout from the JS context. + ctx, cancel = context.WithTimeout(ctx, ttl) + defer cancel() + } + + // Check if context not done already before making the request. + select { + case <-ctx.Done(): + if o.ctx != nil { // Timeout or Cancel triggered by context object option + err = ctx.Err() + } else { // Timeout triggered by timeout option + err = ErrTimeout + } + default: + } + if err != nil { + return nil, err + } + + var ( + msgs = make([]*Msg, 0, batch) + msg *Msg + ) + for pmc && len(msgs) < batch { + // Check next msg with booleans that say that this is an internal call + // for a pull subscribe (so don't reject it) and don't wait if there + // are no messages. + msg, err = sub.nextMsgWithContext(ctx, true, false) + if err != nil { + if err == errNoMessages { + err = nil + } + break + } + // Check msg but just to determine if this is a user message + // or status message, however, we don't care about values of status + // messages at this point in the Fetch() call, so checkMsg can't + // return an error. + if usrMsg, _ := checkMsg(msg, false, false); usrMsg { + msgs = append(msgs, msg) + } + } + if err == nil && len(msgs) < batch { + // For batch real size of 1, it does not make sense to set no_wait in + // the request. + noWait := batch-len(msgs) > 1 + + var nr nextRequest + + sendReq := func() error { + // The current deadline for the context will be used + // to set the expires TTL for a fetch request. + deadline, _ := ctx.Deadline() + ttl = time.Until(deadline) + + // Check if context has already been canceled or expired. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + // Make our request expiration a bit shorter than the current timeout. + expires := ttl + if ttl >= 20*time.Millisecond { + expires = ttl - 10*time.Millisecond + } + + nr.Batch = batch - len(msgs) + nr.Expires = expires + nr.NoWait = noWait + nr.MaxBytes = o.maxBytes + req, _ := json.Marshal(nr) + return nc.PublishRequest(nms, rply, req) + } + + err = sendReq() + for err == nil && len(msgs) < batch { + // Ask for next message and wait if there are no messages + msg, err = sub.nextMsgWithContext(ctx, true, true) + if err == nil { + var usrMsg bool + + usrMsg, err = checkMsg(msg, true, noWait) + if err == nil && usrMsg { + msgs = append(msgs, msg) + } else if noWait && (err == errNoMessages || err == errRequestsPending) && len(msgs) == 0 { + // If we have a 404/408 for our "no_wait" request and have + // not collected any message, then resend request to + // wait this time. + noWait = false + err = sendReq() + } else if err == ErrTimeout && len(msgs) == 0 { + // If we get a 408, we will bail if we already collected some + // messages, otherwise ignore and go back calling NextMsg. + err = nil + } + } + } + } + // If there is at least a message added to msgs, then need to return OK and no error + if err != nil && len(msgs) == 0 { + return nil, o.checkCtxErr(err) + } + return msgs, nil +} + +// MessageBatch provides methods to retrieve messages consumed using [Subscribe.FetchBatch]. +type MessageBatch interface { + // Messages returns a channel on which messages will be published. + Messages() <-chan *Msg + + // Error returns an error encountered when fetching messages. + Error() error + + // Done signals end of execution. + Done() <-chan struct{} +} + +type messageBatch struct { + msgs chan *Msg + err error + done chan struct{} +} + +func (mb *messageBatch) Messages() <-chan *Msg { + return mb.msgs +} + +func (mb *messageBatch) Error() error { + return mb.err +} + +func (mb *messageBatch) Done() <-chan struct{} { + return mb.done +} + +// FetchBatch pulls a batch of messages from a stream for a pull consumer. +// Unlike [Subscription.Fetch], it is non blocking and returns [MessageBatch], +// allowing to retrieve incoming messages from a channel. +// The returned channel is always closed after all messages for a batch have been +// delivered by the server - it is safe to iterate over it using range. +// +// To avoid using default JetStream timeout as fetch expiry time, use [nats.MaxWait] +// or [nats.Context] (with deadline set). +// +// This method will not return error in case of pull request expiry (even if there are no messages). +// Any other error encountered when receiving messages will cause FetchBatch to stop receiving new messages. +func (sub *Subscription) FetchBatch(batch int, opts ...PullOpt) (MessageBatch, error) { + if sub == nil { + return nil, ErrBadSubscription + } + if batch < 1 { + return nil, ErrInvalidArg + } + + var o pullOpts + for _, opt := range opts { + if err := opt.configurePull(&o); err != nil { + return nil, err + } + } + if o.ctx != nil && o.ttl != 0 { + return nil, ErrContextAndTimeout + } + sub.mu.Lock() + jsi := sub.jsi + // Reject if this is not a pull subscription. Note that sub.typ is SyncSubscription, + // so check for jsi.pull boolean instead. + if jsi == nil || !jsi.pull { + sub.mu.Unlock() + return nil, ErrTypeSubscription + } + + nc := sub.conn + nms := sub.jsi.nms + rply := sub.jsi.deliver + js := sub.jsi.js + pmc := len(sub.mch) > 0 + + // All fetch requests have an expiration, in case of no explicit expiration + // then the default timeout of the JetStream context is used. + ttl := o.ttl + if ttl == 0 { + ttl = js.opts.wait + } + sub.mu.Unlock() + + // Use the given context or setup a default one for the span + // of the pull batch request. + var ( + ctx = o.ctx + cancel context.CancelFunc + cancelContext = true + ) + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), ttl) + } else if _, hasDeadline := ctx.Deadline(); !hasDeadline { + // Prevent from passing the background context which will just block + // and cannot be canceled either. + if octx, ok := ctx.(ContextOpt); ok && octx.Context == context.Background() { + return nil, ErrNoDeadlineContext + } + + // If the context did not have a deadline, then create a new child context + // that will use the default timeout from the JS context. + ctx, cancel = context.WithTimeout(ctx, ttl) + } + defer func() { + // only cancel the context here if we are sure the fetching goroutine has not been started yet + if cancel != nil && cancelContext { + cancel() + } + }() + + // Check if context not done already before making the request. + select { + case <-ctx.Done(): + if o.ctx != nil { // Timeout or Cancel triggered by context object option + return nil, ctx.Err() + } else { // Timeout triggered by timeout option + return nil, ErrTimeout + } + default: + } + + result := &messageBatch{ + msgs: make(chan *Msg, batch), + done: make(chan struct{}, 1), + } + var msg *Msg + for pmc && len(result.msgs) < batch { + // Check next msg with booleans that say that this is an internal call + // for a pull subscribe (so don't reject it) and don't wait if there + // are no messages. + msg, err := sub.nextMsgWithContext(ctx, true, false) + if err != nil { + if err == errNoMessages { + err = nil + } + result.err = err + break + } + // Check msg but just to determine if this is a user message + // or status message, however, we don't care about values of status + // messages at this point in the Fetch() call, so checkMsg can't + // return an error. + if usrMsg, _ := checkMsg(msg, false, false); usrMsg { + result.msgs <- msg + } + } + if len(result.msgs) == batch || result.err != nil { + close(result.msgs) + result.done <- struct{}{} + return result, nil + } + + deadline, _ := ctx.Deadline() + ttl = time.Until(deadline) + + // Make our request expiration a bit shorter than the current timeout. + expires := ttl + if ttl >= 20*time.Millisecond { + expires = ttl - 10*time.Millisecond + } + + requestBatch := batch - len(result.msgs) + req := nextRequest{ + Expires: expires, + Batch: requestBatch, + MaxBytes: o.maxBytes, + } + reqJSON, err := json.Marshal(req) + if err != nil { + close(result.msgs) + result.done <- struct{}{} + result.err = err + return result, nil + } + if err := nc.PublishRequest(nms, rply, reqJSON); err != nil { + if len(result.msgs) == 0 { + return nil, err + } + close(result.msgs) + result.done <- struct{}{} + result.err = err + return result, nil + } + cancelContext = false + go func() { + if cancel != nil { + defer cancel() + } + var requestMsgs int + for requestMsgs < requestBatch { + // Ask for next message and wait if there are no messages + msg, err = sub.nextMsgWithContext(ctx, true, true) + if err != nil { + break + } + var usrMsg bool + + usrMsg, err = checkMsg(msg, true, false) + if err != nil { + if err == ErrTimeout { + err = nil + } + break + } + if usrMsg { + result.msgs <- msg + requestMsgs++ + } + } + if err != nil { + result.err = o.checkCtxErr(err) + } + close(result.msgs) + result.done <- struct{}{} + }() + return result, nil +} + +// checkCtxErr is used to determine whether ErrTimeout should be returned in case of context timeout +func (o *pullOpts) checkCtxErr(err error) error { + if o.ctx == nil && err == context.DeadlineExceeded { + return ErrTimeout + } + return err +} + +func (js *js) getConsumerInfo(stream, consumer string) (*ConsumerInfo, error) { + ctx, cancel := context.WithTimeout(context.Background(), js.opts.wait) + defer cancel() + return js.getConsumerInfoContext(ctx, stream, consumer) +} + +func (js *js) getConsumerInfoContext(ctx context.Context, stream, consumer string) (*ConsumerInfo, error) { + ccInfoSubj := fmt.Sprintf(apiConsumerInfoT, stream, consumer) + resp, err := js.apiRequestWithContext(ctx, js.apiSubj(ccInfoSubj), nil) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + + var info consumerResponse + if err := json.Unmarshal(resp.Data, &info); err != nil { + return nil, err + } + if info.Error != nil { + if errors.Is(info.Error, ErrConsumerNotFound) { + return nil, ErrConsumerNotFound + } + if errors.Is(info.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, info.Error + } + return info.ConsumerInfo, nil +} + +// a RequestWithContext with tracing via TraceCB +func (js *js) apiRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + if js.opts.shouldTrace { + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.RequestSent(subj, data) + } + } + resp, err := js.nc.RequestWithContext(ctx, subj, data) + if err != nil { + return nil, err + } + if js.opts.shouldTrace { + ctrace := js.opts.ctrace + if ctrace.RequestSent != nil { + ctrace.ResponseReceived(subj, resp.Data, resp.Header) + } + } + + return resp, nil +} + +func (m *Msg) checkReply() error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == _EMPTY_ { + return ErrMsgNoReply + } + return nil +} + +// ackReply handles all acks. Will do the right thing for pull and sync mode. +// It ensures that an ack is only sent a single time, regardless of +// how many times it is being called to avoid duplicated acks. +func (m *Msg) ackReply(ackType []byte, sync bool, opts ...AckOpt) error { + var o ackOpts + for _, opt := range opts { + if err := opt.configureAck(&o); err != nil { + return err + } + } + + if err := m.checkReply(); err != nil { + return err + } + + var ackNone bool + var js *js + + sub := m.Sub + sub.mu.Lock() + nc := sub.conn + if jsi := sub.jsi; jsi != nil { + js = jsi.js + ackNone = jsi.ackNone + } + sub.mu.Unlock() + + // Skip if already acked. + if atomic.LoadUint32(&m.ackd) == 1 { + return ErrMsgAlreadyAckd + } + if ackNone { + return ErrCantAckIfConsumerAckNone + } + + usesCtx := o.ctx != nil + usesWait := o.ttl > 0 + + // Only allow either AckWait or Context option to set the timeout. + if usesWait && usesCtx { + return ErrContextAndTimeout + } + + sync = sync || usesCtx || usesWait + ctx := o.ctx + wait := defaultRequestWait + if usesWait { + wait = o.ttl + } else if js != nil { + wait = js.opts.wait + } + + var body []byte + var err error + // This will be > 0 only when called from NakWithDelay() + if o.nakDelay > 0 { + body = []byte(fmt.Sprintf("%s {\"delay\": %d}", ackType, o.nakDelay.Nanoseconds())) + } else { + body = ackType + } + + if sync { + if usesCtx { + _, err = nc.RequestWithContext(ctx, m.Reply, body) + } else { + _, err = nc.Request(m.Reply, body, wait) + } + } else { + err = nc.Publish(m.Reply, body) + } + + // Mark that the message has been acked unless it is ackProgress + // which can be sent many times. + if err == nil && !bytes.Equal(ackType, ackProgress) { + atomic.StoreUint32(&m.ackd, 1) + } + + return err +} + +// Ack acknowledges a message. This tells the server that the message was +// successfully processed and it can move on to the next message. +func (m *Msg) Ack(opts ...AckOpt) error { + return m.ackReply(ackAck, false, opts...) +} + +// AckSync is the synchronous version of Ack. This indicates successful message +// processing. +func (m *Msg) AckSync(opts ...AckOpt) error { + return m.ackReply(ackAck, true, opts...) +} + +// Nak negatively acknowledges a message. This tells the server to redeliver +// the message. You can configure the number of redeliveries by passing +// nats.MaxDeliver when you Subscribe. The default is infinite redeliveries. +func (m *Msg) Nak(opts ...AckOpt) error { + return m.ackReply(ackNak, false, opts...) +} + +// Nak negatively acknowledges a message. This tells the server to redeliver +// the message after the give `delay` duration. You can configure the number +// of redeliveries by passing nats.MaxDeliver when you Subscribe. +// The default is infinite redeliveries. +func (m *Msg) NakWithDelay(delay time.Duration, opts ...AckOpt) error { + if delay > 0 { + opts = append(opts, nakDelay(delay)) + } + return m.ackReply(ackNak, false, opts...) +} + +// Term tells the server to not redeliver this message, regardless of the value +// of nats.MaxDeliver. +func (m *Msg) Term(opts ...AckOpt) error { + return m.ackReply(ackTerm, false, opts...) +} + +// InProgress tells the server that this message is being worked on. It resets +// the redelivery timer on the server. +func (m *Msg) InProgress(opts ...AckOpt) error { + return m.ackReply(ackProgress, false, opts...) +} + +// MsgMetadata is the JetStream metadata associated with received messages. +type MsgMetadata struct { + Sequence SequencePair + NumDelivered uint64 + NumPending uint64 + Timestamp time.Time + Stream string + Consumer string + Domain string +} + +const ( + ackDomainTokenPos = 2 + ackAccHashTokenPos = 3 + ackStreamTokenPos = 4 + ackConsumerTokenPos = 5 + ackNumDeliveredTokenPos = 6 + ackStreamSeqTokenPos = 7 + ackConsumerSeqTokenPos = 8 + ackTimestampSeqTokenPos = 9 + ackNumPendingTokenPos = 10 +) + +func getMetadataFields(subject string) ([]string, error) { + const v1TokenCounts = 9 + const v2TokenCounts = 12 + const noDomainName = "_" + + const btsep = '.' + tsa := [v2TokenCounts]string{} + start, tokens := 0, tsa[:0] + for i := 0; i < len(subject); i++ { + if subject[i] == btsep { + tokens = append(tokens, subject[start:i]) + start = i + 1 + } + } + tokens = append(tokens, subject[start:]) + // + // Newer server will include the domain name and account hash in the subject, + // and a token at the end. + // + // Old subject was: + // $JS.ACK....... + // + // New subject would be: + // $JS.ACK.......... + // + // v1 has 9 tokens, v2 has 12, but we must not be strict on the 12th since + // it may be removed in the future. Also, the library has no use for it. + // The point is that a v2 ACK subject is valid if it has at least 11 tokens. + // + l := len(tokens) + // If lower than 9 or more than 9 but less than 11, report an error + if l < v1TokenCounts || (l > v1TokenCounts && l < v2TokenCounts-1) { + return nil, ErrNotJSMessage + } + if tokens[0] != "$JS" || tokens[1] != "ACK" { + return nil, ErrNotJSMessage + } + // For v1 style, we insert 2 empty tokens (domain and hash) so that the + // rest of the library references known fields at a constant location. + if l == 9 { + // Extend the array (we know the backend is big enough) + tokens = append(tokens, _EMPTY_, _EMPTY_) + // Move to the right anything that is after "ACK" token. + copy(tokens[ackDomainTokenPos+2:], tokens[ackDomainTokenPos:]) + // Clear the domain and hash tokens + tokens[ackDomainTokenPos], tokens[ackAccHashTokenPos] = _EMPTY_, _EMPTY_ + + } else if tokens[ackDomainTokenPos] == noDomainName { + // If domain is "_", replace with empty value. + tokens[ackDomainTokenPos] = _EMPTY_ + } + return tokens, nil +} + +// Metadata retrieves the metadata from a JetStream message. This method will +// return an error for non-JetStream Msgs. +func (m *Msg) Metadata() (*MsgMetadata, error) { + if err := m.checkReply(); err != nil { + return nil, err + } + + tokens, err := getMetadataFields(m.Reply) + if err != nil { + return nil, err + } + + meta := &MsgMetadata{ + Domain: tokens[ackDomainTokenPos], + NumDelivered: uint64(parseNum(tokens[ackNumDeliveredTokenPos])), + NumPending: uint64(parseNum(tokens[ackNumPendingTokenPos])), + Timestamp: time.Unix(0, parseNum(tokens[ackTimestampSeqTokenPos])), + Stream: tokens[ackStreamTokenPos], + Consumer: tokens[ackConsumerTokenPos], + } + meta.Sequence.Stream = uint64(parseNum(tokens[ackStreamSeqTokenPos])) + meta.Sequence.Consumer = uint64(parseNum(tokens[ackConsumerSeqTokenPos])) + return meta, nil +} + +// Quick parser for positive numbers in ack reply encoding. +func parseNum(d string) (n int64) { + if len(d) == 0 { + return -1 + } + + // ASCII numbers 0-9 + const ( + asciiZero = 48 + asciiNine = 57 + ) + + for _, dec := range d { + if dec < asciiZero || dec > asciiNine { + return -1 + } + n = n*10 + (int64(dec) - asciiZero) + } + return n +} + +// AckPolicy determines how the consumer should acknowledge delivered messages. +type AckPolicy int + +const ( + // AckNonePolicy requires no acks for delivered messages. + AckNonePolicy AckPolicy = iota + + // AckAllPolicy when acking a sequence number, this implicitly acks all + // sequences below this one as well. + AckAllPolicy + + // AckExplicitPolicy requires ack or nack for all messages. + AckExplicitPolicy + + // For configuration mismatch check + ackPolicyNotSet = 99 +) + +func jsonString(s string) string { + return "\"" + s + "\"" +} + +func (p *AckPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("none"): + *p = AckNonePolicy + case jsonString("all"): + *p = AckAllPolicy + case jsonString("explicit"): + *p = AckExplicitPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + + return nil +} + +func (p AckPolicy) MarshalJSON() ([]byte, error) { + switch p { + case AckNonePolicy: + return json.Marshal("none") + case AckAllPolicy: + return json.Marshal("all") + case AckExplicitPolicy: + return json.Marshal("explicit") + default: + return nil, fmt.Errorf("nats: unknown acknowlegement policy %v", p) + } +} + +func (p AckPolicy) String() string { + switch p { + case AckNonePolicy: + return "AckNone" + case AckAllPolicy: + return "AckAll" + case AckExplicitPolicy: + return "AckExplicit" + case ackPolicyNotSet: + return "Not Initialized" + default: + return "Unknown AckPolicy" + } +} + +// ReplayPolicy determines how the consumer should replay messages it already has queued in the stream. +type ReplayPolicy int + +const ( + // ReplayInstantPolicy will replay messages as fast as possible. + ReplayInstantPolicy ReplayPolicy = iota + + // ReplayOriginalPolicy will maintain the same timing as the messages were received. + ReplayOriginalPolicy + + // For configuration mismatch check + replayPolicyNotSet = 99 +) + +func (p *ReplayPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("instant"): + *p = ReplayInstantPolicy + case jsonString("original"): + *p = ReplayOriginalPolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + + return nil +} + +func (p ReplayPolicy) MarshalJSON() ([]byte, error) { + switch p { + case ReplayOriginalPolicy: + return json.Marshal("original") + case ReplayInstantPolicy: + return json.Marshal("instant") + default: + return nil, fmt.Errorf("nats: unknown replay policy %v", p) + } +} + +var ( + ackAck = []byte("+ACK") + ackNak = []byte("-NAK") + ackProgress = []byte("+WPI") + ackTerm = []byte("+TERM") +) + +// DeliverPolicy determines how the consumer should select the first message to deliver. +type DeliverPolicy int + +const ( + // DeliverAllPolicy starts delivering messages from the very beginning of a + // stream. This is the default. + DeliverAllPolicy DeliverPolicy = iota + + // DeliverLastPolicy will start the consumer with the last sequence + // received. + DeliverLastPolicy + + // DeliverNewPolicy will only deliver new messages that are sent after the + // consumer is created. + DeliverNewPolicy + + // DeliverByStartSequencePolicy will deliver messages starting from a given + // sequence. + DeliverByStartSequencePolicy + + // DeliverByStartTimePolicy will deliver messages starting from a given + // time. + DeliverByStartTimePolicy + + // DeliverLastPerSubjectPolicy will start the consumer with the last message + // for all subjects received. + DeliverLastPerSubjectPolicy + + // For configuration mismatch check + deliverPolicyNotSet = 99 +) + +func (p *DeliverPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString("all"), jsonString("undefined"): + *p = DeliverAllPolicy + case jsonString("last"): + *p = DeliverLastPolicy + case jsonString("new"): + *p = DeliverNewPolicy + case jsonString("by_start_sequence"): + *p = DeliverByStartSequencePolicy + case jsonString("by_start_time"): + *p = DeliverByStartTimePolicy + case jsonString("last_per_subject"): + *p = DeliverLastPerSubjectPolicy + } + + return nil +} + +func (p DeliverPolicy) MarshalJSON() ([]byte, error) { + switch p { + case DeliverAllPolicy: + return json.Marshal("all") + case DeliverLastPolicy: + return json.Marshal("last") + case DeliverNewPolicy: + return json.Marshal("new") + case DeliverByStartSequencePolicy: + return json.Marshal("by_start_sequence") + case DeliverByStartTimePolicy: + return json.Marshal("by_start_time") + case DeliverLastPerSubjectPolicy: + return json.Marshal("last_per_subject") + default: + return nil, fmt.Errorf("nats: unknown deliver policy %v", p) + } +} + +// RetentionPolicy determines how messages in a set are retained. +type RetentionPolicy int + +const ( + // LimitsPolicy (default) means that messages are retained until any given limit is reached. + // This could be one of MaxMsgs, MaxBytes, or MaxAge. + LimitsPolicy RetentionPolicy = iota + // InterestPolicy specifies that when all known observables have acknowledged a message it can be removed. + InterestPolicy + // WorkQueuePolicy specifies that when the first worker or subscriber acknowledges the message it can be removed. + WorkQueuePolicy +) + +// DiscardPolicy determines how to proceed when limits of messages or bytes are +// reached. +type DiscardPolicy int + +const ( + // DiscardOld will remove older messages to return to the limits. This is + // the default. + DiscardOld DiscardPolicy = iota + //DiscardNew will fail to store new messages. + DiscardNew +) + +const ( + limitsPolicyString = "limits" + interestPolicyString = "interest" + workQueuePolicyString = "workqueue" +) + +func (rp RetentionPolicy) String() string { + switch rp { + case LimitsPolicy: + return "Limits" + case InterestPolicy: + return "Interest" + case WorkQueuePolicy: + return "WorkQueue" + default: + return "Unknown Retention Policy" + } +} + +func (rp RetentionPolicy) MarshalJSON() ([]byte, error) { + switch rp { + case LimitsPolicy: + return json.Marshal(limitsPolicyString) + case InterestPolicy: + return json.Marshal(interestPolicyString) + case WorkQueuePolicy: + return json.Marshal(workQueuePolicyString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", rp) + } +} + +func (rp *RetentionPolicy) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(limitsPolicyString): + *rp = LimitsPolicy + case jsonString(interestPolicyString): + *rp = InterestPolicy + case jsonString(workQueuePolicyString): + *rp = WorkQueuePolicy + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +func (dp DiscardPolicy) String() string { + switch dp { + case DiscardOld: + return "DiscardOld" + case DiscardNew: + return "DiscardNew" + default: + return "Unknown Discard Policy" + } +} + +func (dp DiscardPolicy) MarshalJSON() ([]byte, error) { + switch dp { + case DiscardOld: + return json.Marshal("old") + case DiscardNew: + return json.Marshal("new") + default: + return nil, fmt.Errorf("nats: can not marshal %v", dp) + } +} + +func (dp *DiscardPolicy) UnmarshalJSON(data []byte) error { + switch strings.ToLower(string(data)) { + case jsonString("old"): + *dp = DiscardOld + case jsonString("new"): + *dp = DiscardNew + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} + +// StorageType determines how messages are stored for retention. +type StorageType int + +const ( + // FileStorage specifies on disk storage. It's the default. + FileStorage StorageType = iota + // MemoryStorage specifies in memory only. + MemoryStorage +) + +const ( + memoryStorageString = "memory" + fileStorageString = "file" +) + +func (st StorageType) String() string { + switch st { + case MemoryStorage: + return "Memory" + case FileStorage: + return "File" + default: + return "Unknown Storage Type" + } +} + +func (st StorageType) MarshalJSON() ([]byte, error) { + switch st { + case MemoryStorage: + return json.Marshal(memoryStorageString) + case FileStorage: + return json.Marshal(fileStorageString) + default: + return nil, fmt.Errorf("nats: can not marshal %v", st) + } +} + +func (st *StorageType) UnmarshalJSON(data []byte) error { + switch string(data) { + case jsonString(memoryStorageString): + *st = MemoryStorage + case jsonString(fileStorageString): + *st = FileStorage + default: + return fmt.Errorf("nats: can not unmarshal %q", data) + } + return nil +} diff --git a/vendor/github.com/nats-io/nats.go/jserrors.go b/vendor/github.com/nats-io/nats.go/jserrors.go new file mode 100644 index 00000000..d7959ca8 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jserrors.go @@ -0,0 +1,194 @@ +// Copyright 2020-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "fmt" +) + +var ( + // API errors + + // ErrJetStreamNotEnabled is an error returned when JetStream is not enabled for an account. + ErrJetStreamNotEnabled JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabled, Description: "jetstream not enabled", Code: 503}} + + // ErrJetStreamNotEnabledForAccount is an error returned when JetStream is not enabled for an account. + ErrJetStreamNotEnabledForAccount JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeJetStreamNotEnabledForAccount, Description: "jetstream not enabled for account", Code: 503}} + + // ErrStreamNotFound is an error returned when stream with given name does not exist. + ErrStreamNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNotFound, Description: "stream not found", Code: 404}} + + // ErrStreamNameAlreadyInUse is returned when a stream with given name already exists and has a different configuration. + ErrStreamNameAlreadyInUse JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamNameInUse, Description: "stream name already in use", Code: 400}} + + // ErrConsumerNotFound is an error returned when consumer with given name does not exist. + ErrConsumerNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeConsumerNotFound, Description: "consumer not found", Code: 404}} + + // ErrMsgNotFound is returned when message with provided sequence number does npt exist. + ErrMsgNotFound JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeMessageNotFound, Description: "message not found", Code: 404}} + + // ErrBadRequest is returned when invalid request is sent to JetStream API. + ErrBadRequest JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeBadRequest, Description: "bad request", Code: 400}} + + // Client errors + + // ErrConsumerNameAlreadyInUse is an error returned when consumer with given name already exists. + ErrConsumerNameAlreadyInUse JetStreamError = &jsError{message: "consumer name already in use"} + + // ErrConsumerNotActive is an error returned when consumer is not active. + ErrConsumerNotActive JetStreamError = &jsError{message: "consumer not active"} + + // ErrInvalidJSAck is returned when JetStream ack from message publish is invalid. + ErrInvalidJSAck JetStreamError = &jsError{message: "invalid jetstream publish response"} + + // ErrStreamConfigRequired is returned when empty stream configuration is supplied to add/update stream. + ErrStreamConfigRequired JetStreamError = &jsError{message: "stream configuration is required"} + + // ErrStreamNameRequired is returned when the provided stream name is empty. + ErrStreamNameRequired JetStreamError = &jsError{message: "stream name is required"} + + // ErrConsumerNameRequired is returned when the provided consumer durable name is empty. + ErrConsumerNameRequired JetStreamError = &jsError{message: "consumer name is required"} + + // ErrConsumerConfigRequired is returned when empty consumer consuguration is supplied to add/update consumer. + ErrConsumerConfigRequired JetStreamError = &jsError{message: "consumer configuration is required"} + + // ErrPullSubscribeToPushConsumer is returned when attempting to use PullSubscribe on push consumer. + ErrPullSubscribeToPushConsumer JetStreamError = &jsError{message: "cannot pull subscribe to push based consumer"} + + // ErrPullSubscribeRequired is returned when attempting to use subscribe methods not suitable for pull consumers for pull consumers. + ErrPullSubscribeRequired JetStreamError = &jsError{message: "must use pull subscribe to bind to pull based consumer"} + + // ErrMsgAlreadyAckd is returned when attempting to acknowledge message more than once. + ErrMsgAlreadyAckd JetStreamError = &jsError{message: "message was already acknowledged"} + + // ErrNoStreamResponse is returned when there is no response from stream (e.g. no responders error). + ErrNoStreamResponse JetStreamError = &jsError{message: "no response from stream"} + + // ErrNotJSMessage is returned when attempting to get metadata from non JetStream message . + ErrNotJSMessage JetStreamError = &jsError{message: "not a jetstream message"} + + // ErrInvalidStreamName is returned when the provided stream name is invalid (contains '.' or ' '). + ErrInvalidStreamName JetStreamError = &jsError{message: "invalid stream name"} + + // ErrInvalidConsumerName is returned when the provided consumer name is invalid (contains '.' or ' '). + ErrInvalidConsumerName JetStreamError = &jsError{message: "invalid consumer name"} + + // ErrNoMatchingStream is returned when stream lookup by subject is unsuccessful. + ErrNoMatchingStream JetStreamError = &jsError{message: "no stream matches subject"} + + // ErrSubjectMismatch is returned when the provided subject does not match consumer's filter subject. + ErrSubjectMismatch JetStreamError = &jsError{message: "subject does not match consumer"} + + // ErrContextAndTimeout is returned when attempting to use both context and timeout. + ErrContextAndTimeout JetStreamError = &jsError{message: "context and timeout can not both be set"} + + // ErrCantAckIfConsumerAckNone is returned when attempting to ack a message for consumer with AckNone policy set. + ErrCantAckIfConsumerAckNone JetStreamError = &jsError{message: "cannot acknowledge a message for a consumer with AckNone policy"} + + // ErrConsumerDeleted is returned when attempting to send pull request to a consumer which does not exist + ErrConsumerDeleted JetStreamError = &jsError{message: "consumer deleted"} + + // ErrConsumerLeadershipChanged is returned when pending requests are no longer valid after leadership has changed + ErrConsumerLeadershipChanged JetStreamError = &jsError{message: "Leadership Changed"} + + // DEPRECATED: ErrInvalidDurableName is no longer returned and will be removed in future releases. + // Use ErrInvalidConsumerName instead. + ErrInvalidDurableName = errors.New("nats: invalid durable name") +) + +// Error code represents JetStream error codes returned by the API +type ErrorCode uint16 + +const ( + JSErrCodeJetStreamNotEnabledForAccount ErrorCode = 10039 + JSErrCodeJetStreamNotEnabled ErrorCode = 10076 + JSErrCodeInsufficientResourcesErr ErrorCode = 10023 + + JSErrCodeStreamNotFound ErrorCode = 10059 + JSErrCodeStreamNameInUse ErrorCode = 10058 + + JSErrCodeConsumerNotFound ErrorCode = 10014 + JSErrCodeConsumerNameExists ErrorCode = 10013 + JSErrCodeConsumerAlreadyExists ErrorCode = 10105 + + JSErrCodeMessageNotFound ErrorCode = 10037 + + JSErrCodeBadRequest ErrorCode = 10003 + + JSErrCodeStreamWrongLastSequence ErrorCode = 10071 +) + +// APIError is included in all API responses if there was an error. +type APIError struct { + Code int `json:"code"` + ErrorCode ErrorCode `json:"err_code"` + Description string `json:"description,omitempty"` +} + +// Error prints the JetStream API error code and description +func (e *APIError) Error() string { + return fmt.Sprintf("nats: %s", e.Description) +} + +// APIError implements the JetStreamError interface. +func (e *APIError) APIError() *APIError { + return e +} + +// Is matches against an APIError. +func (e *APIError) Is(err error) bool { + if e == nil { + return false + } + // Extract internal APIError to match against. + var aerr *APIError + ok := errors.As(err, &aerr) + if !ok { + return ok + } + return e.ErrorCode == aerr.ErrorCode +} + +// JetStreamError is an error result that happens when using JetStream. +// In case of client-side error, `APIError()` returns nil +type JetStreamError interface { + APIError() *APIError + error +} + +type jsError struct { + apiErr *APIError + message string +} + +func (err *jsError) APIError() *APIError { + return err.apiErr +} + +func (err *jsError) Error() string { + if err.apiErr != nil && err.apiErr.Description != "" { + return err.apiErr.Error() + } + return fmt.Sprintf("nats: %s", err.message) +} + +func (err *jsError) Unwrap() error { + // Allow matching to embedded APIError in case there is one. + if err.apiErr == nil { + return nil + } + return err.apiErr +} diff --git a/vendor/github.com/nats-io/nats.go/jsm.go b/vendor/github.com/nats-io/nats.go/jsm.go new file mode 100644 index 00000000..fbf85f5f --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/jsm.go @@ -0,0 +1,1590 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "time" +) + +// JetStreamManager manages JetStream Streams and Consumers. +type JetStreamManager interface { + // AddStream creates a stream. + AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) + + // UpdateStream updates a stream. + UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) + + // DeleteStream deletes a stream. + DeleteStream(name string, opts ...JSOpt) error + + // StreamInfo retrieves information from a stream. + StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) + + // PurgeStream purges a stream messages. + PurgeStream(name string, opts ...JSOpt) error + + // StreamsInfo can be used to retrieve a list of StreamInfo objects. + // DEPRECATED: Use Streams() instead. + StreamsInfo(opts ...JSOpt) <-chan *StreamInfo + + // Streams can be used to retrieve a list of StreamInfo objects. + Streams(opts ...JSOpt) <-chan *StreamInfo + + // StreamNames is used to retrieve a list of Stream names. + StreamNames(opts ...JSOpt) <-chan string + + // GetMsg retrieves a raw stream message stored in JetStream by sequence number. + // Use options nats.DirectGet() or nats.DirectGetNext() to trigger retrieval + // directly from a distributed group of servers (leader and replicas). + // The stream must have been created/updated with the AllowDirect boolean. + GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) + + // GetLastMsg retrieves the last raw stream message stored in JetStream by subject. + // Use option nats.DirectGet() to trigger retrieval + // directly from a distributed group of servers (leader and replicas). + // The stream must have been created/updated with the AllowDirect boolean. + GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) + + // DeleteMsg deletes a message from a stream. The message is marked as erased, but its value is not overwritten. + DeleteMsg(name string, seq uint64, opts ...JSOpt) error + + // SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data + // As a result, this operation is slower than DeleteMsg() + SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error + + // AddConsumer adds a consumer to a stream. + AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) + + // UpdateConsumer updates an existing consumer. + UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) + + // DeleteConsumer deletes a consumer. + DeleteConsumer(stream, consumer string, opts ...JSOpt) error + + // ConsumerInfo retrieves information of a consumer from a stream. + ConsumerInfo(stream, name string, opts ...JSOpt) (*ConsumerInfo, error) + + // ConsumersInfo is used to retrieve a list of ConsumerInfo objects. + // DEPRECATED: Use Consumers() instead. + ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo + + // Consumers is used to retrieve a list of ConsumerInfo objects. + Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo + + // ConsumerNames is used to retrieve a list of Consumer names. + ConsumerNames(stream string, opts ...JSOpt) <-chan string + + // AccountInfo retrieves info about the JetStream usage from an account. + AccountInfo(opts ...JSOpt) (*AccountInfo, error) + + // StreamNameBySubject returns a stream matching given subject. + StreamNameBySubject(string, ...JSOpt) (string, error) +} + +// StreamConfig will determine the properties for a stream. +// There are sensible defaults for most. If no subjects are +// given the name will be used as the only subject. +type StreamConfig struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Subjects []string `json:"subjects,omitempty"` + Retention RetentionPolicy `json:"retention"` + MaxConsumers int `json:"max_consumers"` + MaxMsgs int64 `json:"max_msgs"` + MaxBytes int64 `json:"max_bytes"` + Discard DiscardPolicy `json:"discard"` + DiscardNewPerSubject bool `json:"discard_new_per_subject,omitempty"` + MaxAge time.Duration `json:"max_age"` + MaxMsgsPerSubject int64 `json:"max_msgs_per_subject"` + MaxMsgSize int32 `json:"max_msg_size,omitempty"` + Storage StorageType `json:"storage"` + Replicas int `json:"num_replicas"` + NoAck bool `json:"no_ack,omitempty"` + Template string `json:"template_owner,omitempty"` + Duplicates time.Duration `json:"duplicate_window,omitempty"` + Placement *Placement `json:"placement,omitempty"` + Mirror *StreamSource `json:"mirror,omitempty"` + Sources []*StreamSource `json:"sources,omitempty"` + Sealed bool `json:"sealed,omitempty"` + DenyDelete bool `json:"deny_delete,omitempty"` + DenyPurge bool `json:"deny_purge,omitempty"` + AllowRollup bool `json:"allow_rollup_hdrs,omitempty"` + + // Allow republish of the message after being sequenced and stored. + RePublish *RePublish `json:"republish,omitempty"` + + // Allow higher performance, direct access to get individual messages. E.g. KeyValue + AllowDirect bool `json:"allow_direct"` + // Allow higher performance and unified direct access for mirrors as well. + MirrorDirect bool `json:"mirror_direct"` +} + +// RePublish is for republishing messages once committed to a stream. The original +// subject cis remapped from the subject pattern to the destination pattern. +type RePublish struct { + Source string `json:"src,omitempty"` + Destination string `json:"dest"` + HeadersOnly bool `json:"headers_only,omitempty"` +} + +// Placement is used to guide placement of streams in clustered JetStream. +type Placement struct { + Cluster string `json:"cluster"` + Tags []string `json:"tags,omitempty"` +} + +// StreamSource dictates how streams can source from other streams. +type StreamSource struct { + Name string `json:"name"` + OptStartSeq uint64 `json:"opt_start_seq,omitempty"` + OptStartTime *time.Time `json:"opt_start_time,omitempty"` + FilterSubject string `json:"filter_subject,omitempty"` + External *ExternalStream `json:"external,omitempty"` + Domain string `json:"-"` +} + +// ExternalStream allows you to qualify access to a stream source in another +// account. +type ExternalStream struct { + APIPrefix string `json:"api"` + DeliverPrefix string `json:"deliver,omitempty"` +} + +// Helper for copying when we do not want to change user's version. +func (ss *StreamSource) copy() *StreamSource { + nss := *ss + // Check pointers + if ss.OptStartTime != nil { + t := *ss.OptStartTime + nss.OptStartTime = &t + } + if ss.External != nil { + ext := *ss.External + nss.External = &ext + } + return &nss +} + +// If we have a Domain, convert to the appropriate ext.APIPrefix. +// This will change the stream source, so should be a copy passed in. +func (ss *StreamSource) convertDomain() error { + if ss.Domain == _EMPTY_ { + return nil + } + if ss.External != nil { + // These should be mutually exclusive. + // TODO(dlc) - Make generic? + return errors.New("nats: domain and external are both set") + } + ss.External = &ExternalStream{APIPrefix: fmt.Sprintf(jsExtDomainT, ss.Domain)} + return nil +} + +// apiResponse is a standard response from the JetStream JSON API +type apiResponse struct { + Type string `json:"type"` + Error *APIError `json:"error,omitempty"` +} + +// apiPaged includes variables used to create paged responses from the JSON API +type apiPaged struct { + Total int `json:"total"` + Offset int `json:"offset"` + Limit int `json:"limit"` +} + +// apiPagedRequest includes parameters allowing specific pages to be requested +// from APIs responding with apiPaged. +type apiPagedRequest struct { + Offset int `json:"offset,omitempty"` +} + +// AccountInfo contains info about the JetStream usage from the current account. +type AccountInfo struct { + Tier + Domain string `json:"domain"` + API APIStats `json:"api"` + Tiers map[string]Tier `json:"tiers"` +} + +type Tier struct { + Memory uint64 `json:"memory"` + Store uint64 `json:"storage"` + Streams int `json:"streams"` + Consumers int `json:"consumers"` + Limits AccountLimits `json:"limits"` +} + +// APIStats reports on API calls to JetStream for this account. +type APIStats struct { + Total uint64 `json:"total"` + Errors uint64 `json:"errors"` +} + +// AccountLimits includes the JetStream limits of the current account. +type AccountLimits struct { + MaxMemory int64 `json:"max_memory"` + MaxStore int64 `json:"max_storage"` + MaxStreams int `json:"max_streams"` + MaxConsumers int `json:"max_consumers"` + MaxAckPending int `json:"max_ack_pending"` + MemoryMaxStreamBytes int64 `json:"memory_max_stream_bytes"` + StoreMaxStreamBytes int64 `json:"storage_max_stream_bytes"` + MaxBytesRequired bool `json:"max_bytes_required"` +} + +type accountInfoResponse struct { + apiResponse + AccountInfo +} + +// AccountInfo retrieves info about the JetStream usage from the current account. +// If JetStream is not enabled, this will return ErrJetStreamNotEnabled +// Other errors can happen but are generally considered retryable +func (js *js) AccountInfo(opts ...JSOpt) (*AccountInfo, error) { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(apiAccountInfo), nil) + if err != nil { + // todo maybe nats server should never have no responder on this subject and always respond if they know there is no js to be had + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + var info accountInfoResponse + if err := json.Unmarshal(resp.Data, &info); err != nil { + return nil, err + } + if info.Error != nil { + // Internally checks based on error code instead of description match. + if errors.Is(info.Error, ErrJetStreamNotEnabledForAccount) { + return nil, ErrJetStreamNotEnabledForAccount + } + return nil, info.Error + } + + return &info.AccountInfo, nil +} + +type createConsumerRequest struct { + Stream string `json:"stream_name"` + Config *ConsumerConfig `json:"config"` +} + +type consumerResponse struct { + apiResponse + *ConsumerInfo +} + +// AddConsumer will add a JetStream consumer. +func (js *js) AddConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if cfg == nil { + cfg = &ConsumerConfig{} + } + consumerName := cfg.Name + if consumerName == _EMPTY_ { + consumerName = cfg.Durable + } + if consumerName != _EMPTY_ { + consInfo, err := js.ConsumerInfo(stream, consumerName, opts...) + if err != nil && !errors.Is(err, ErrConsumerNotFound) && !errors.Is(err, ErrStreamNotFound) { + return nil, err + } + + if consInfo != nil { + sameConfig := checkConfig(&consInfo.Config, cfg) + if sameConfig != nil { + return nil, fmt.Errorf("%w: creating consumer %q on stream %q", ErrConsumerNameAlreadyInUse, consumerName, stream) + } else { + return consInfo, nil + } + } + } + + return js.upsertConsumer(stream, consumerName, cfg, opts...) +} + +func (js *js) UpdateConsumer(stream string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if cfg == nil { + return nil, ErrConsumerConfigRequired + } + consumerName := cfg.Name + if consumerName == _EMPTY_ { + consumerName = cfg.Durable + } + if consumerName == _EMPTY_ { + return nil, ErrConsumerNameRequired + } + return js.upsertConsumer(stream, consumerName, cfg, opts...) +} + +func (js *js) upsertConsumer(stream, consumerName string, cfg *ConsumerConfig, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + req, err := json.Marshal(&createConsumerRequest{Stream: stream, Config: cfg}) + if err != nil { + return nil, err + } + + var ccSubj string + if consumerName == _EMPTY_ { + // if consumer name is empty, use the legacy ephemeral endpoint + ccSubj = fmt.Sprintf(apiLegacyConsumerCreateT, stream) + } else if err := checkConsumerName(consumerName); err != nil { + return nil, err + } else if !js.nc.serverMinVersion(2, 9, 0) || (cfg.Durable != "" && js.opts.featureFlags.useDurableConsumerCreate) { + // if server version is lower than 2.9.0 or user set the useDurableConsumerCreate flag, use the legacy DURABLE.CREATE endpoint + ccSubj = fmt.Sprintf(apiDurableCreateT, stream, consumerName) + } else { + // if above server version 2.9.0, use the endpoints with consumer name + if cfg.FilterSubject == _EMPTY_ || cfg.FilterSubject == ">" { + ccSubj = fmt.Sprintf(apiConsumerCreateT, stream, consumerName) + } else { + ccSubj = fmt.Sprintf(apiConsumerCreateWithFilterSubjectT, stream, consumerName, cfg.FilterSubject) + } + } + + resp, err := js.apiRequestWithContext(o.ctx, js.apiSubj(ccSubj), req) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return nil, err + } + var info consumerResponse + err = json.Unmarshal(resp.Data, &info) + if err != nil { + return nil, err + } + if info.Error != nil { + if errors.Is(info.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + if errors.Is(info.Error, ErrConsumerNotFound) { + return nil, ErrConsumerNotFound + } + return nil, info.Error + } + return info.ConsumerInfo, nil +} + +// consumerDeleteResponse is the response for a Consumer delete request. +type consumerDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +func checkStreamName(stream string) error { + if stream == _EMPTY_ { + return ErrStreamNameRequired + } + if strings.ContainsAny(stream, ". ") { + return ErrInvalidStreamName + } + return nil +} + +// Check that the consumer name is not empty and is valid (does not contain "." and " "). +// Additional consumer name validation is done in nats-server. +// Returns ErrConsumerNameRequired if consumer name is empty, ErrInvalidConsumerName is invalid, otherwise nil +func checkConsumerName(consumer string) error { + if consumer == _EMPTY_ { + return ErrConsumerNameRequired + } + if strings.ContainsAny(consumer, ". ") { + return ErrInvalidConsumerName + } + return nil +} + +// DeleteConsumer deletes a Consumer. +func (js *js) DeleteConsumer(stream, consumer string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } + if err := checkConsumerName(consumer); err != nil { + return err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + dcSubj := js.apiSubj(fmt.Sprintf(apiConsumerDeleteT, stream, consumer)) + r, err := js.apiRequestWithContext(o.ctx, dcSubj, nil) + if err != nil { + return err + } + var resp consumerDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrConsumerNotFound) { + return ErrConsumerNotFound + } + return resp.Error + } + return nil +} + +// ConsumerInfo returns information about a Consumer. +func (js *js) ConsumerInfo(stream, consumer string, opts ...JSOpt) (*ConsumerInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + if err := checkConsumerName(consumer); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + return js.getConsumerInfoContext(o.ctx, stream, consumer) +} + +// consumerLister fetches pages of ConsumerInfo objects. This object is not +// safe to use for multiple threads. +type consumerLister struct { + stream string + js *js + + err error + offset int + page []*ConsumerInfo + pageInfo *apiPaged +} + +// consumersRequest is the type used for Consumers requests. +type consumersRequest struct { + apiPagedRequest +} + +// consumerListResponse is the response for a Consumers List request. +type consumerListResponse struct { + apiResponse + apiPaged + Consumers []*ConsumerInfo `json:"consumers"` +} + +// Next fetches the next ConsumerInfo page. +func (c *consumerLister) Next() bool { + if c.err != nil { + return false + } + if err := checkStreamName(c.stream); err != nil { + c.err = err + return false + } + if c.pageInfo != nil && c.offset >= c.pageInfo.Total { + return false + } + + req, err := json.Marshal(consumersRequest{ + apiPagedRequest: apiPagedRequest{Offset: c.offset}, + }) + if err != nil { + c.err = err + return false + } + + var cancel context.CancelFunc + ctx := c.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) + defer cancel() + } + + clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerListT, c.stream)) + r, err := c.js.apiRequestWithContext(ctx, clSubj, req) + if err != nil { + c.err = err + return false + } + var resp consumerListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + c.err = err + return false + } + if resp.Error != nil { + c.err = resp.Error + return false + } + + c.pageInfo = &resp.apiPaged + c.page = resp.Consumers + c.offset += len(c.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (c *consumerLister) Page() []*ConsumerInfo { + return c.page +} + +// Err returns any errors found while fetching pages. +func (c *consumerLister) Err() error { + return c.err +} + +// Consumers is used to retrieve a list of ConsumerInfo objects. +func (jsc *js) Consumers(stream string, opts ...JSOpt) <-chan *ConsumerInfo { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan *ConsumerInfo) + l := &consumerLister{js: &js{nc: jsc.nc, opts: o}, stream: stream} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// ConsumersInfo is used to retrieve a list of ConsumerInfo objects. +// DEPRECATED: Use Consumers() instead. +func (jsc *js) ConsumersInfo(stream string, opts ...JSOpt) <-chan *ConsumerInfo { + return jsc.Consumers(stream, opts...) +} + +type consumerNamesLister struct { + stream string + js *js + + err error + offset int + page []string + pageInfo *apiPaged +} + +// consumerNamesListResponse is the response for a Consumers Names List request. +type consumerNamesListResponse struct { + apiResponse + apiPaged + Consumers []string `json:"consumers"` +} + +// Next fetches the next consumer names page. +func (c *consumerNamesLister) Next() bool { + if c.err != nil { + return false + } + if err := checkStreamName(c.stream); err != nil { + c.err = err + return false + } + if c.pageInfo != nil && c.offset >= c.pageInfo.Total { + return false + } + + var cancel context.CancelFunc + ctx := c.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), c.js.opts.wait) + defer cancel() + } + + req, err := json.Marshal(consumersRequest{ + apiPagedRequest: apiPagedRequest{Offset: c.offset}, + }) + if err != nil { + c.err = err + return false + } + clSubj := c.js.apiSubj(fmt.Sprintf(apiConsumerNamesT, c.stream)) + r, err := c.js.apiRequestWithContext(ctx, clSubj, req) + if err != nil { + c.err = err + return false + } + var resp consumerNamesListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + c.err = err + return false + } + if resp.Error != nil { + c.err = resp.Error + return false + } + + c.pageInfo = &resp.apiPaged + c.page = resp.Consumers + c.offset += len(c.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (c *consumerNamesLister) Page() []string { + return c.page +} + +// Err returns any errors found while fetching pages. +func (c *consumerNamesLister) Err() error { + return c.err +} + +// ConsumerNames is used to retrieve a list of Consumer names. +func (jsc *js) ConsumerNames(stream string, opts ...JSOpt) <-chan string { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan string) + l := &consumerNamesLister{stream: stream, js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// streamCreateResponse stream creation. +type streamCreateResponse struct { + apiResponse + *StreamInfo +} + +func (js *js) AddStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + // In case we need to change anything, copy so we do not change the caller's version. + ncfg := *cfg + + // If we have a mirror and an external domain, convert to ext.APIPrefix. + if cfg.Mirror != nil && cfg.Mirror.Domain != _EMPTY_ { + // Copy so we do not change the caller's version. + ncfg.Mirror = ncfg.Mirror.copy() + if err := ncfg.Mirror.convertDomain(); err != nil { + return nil, err + } + } + // Check sources for the same. + if len(ncfg.Sources) > 0 { + ncfg.Sources = append([]*StreamSource(nil), ncfg.Sources...) + for i, ss := range ncfg.Sources { + if ss.Domain != _EMPTY_ { + ncfg.Sources[i] = ss.copy() + if err := ncfg.Sources[i].convertDomain(); err != nil { + return nil, err + } + } + } + } + + req, err := json.Marshal(&ncfg) + if err != nil { + return nil, err + } + + csSubj := js.apiSubj(fmt.Sprintf(apiStreamCreateT, cfg.Name)) + r, err := js.apiRequestWithContext(o.ctx, csSubj, req) + if err != nil { + return nil, err + } + var resp streamCreateResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNameAlreadyInUse) { + return nil, ErrStreamNameAlreadyInUse + } + return nil, resp.Error + } + + return resp.StreamInfo, nil +} + +type ( + // StreamInfoRequest contains additional option to return + StreamInfoRequest struct { + apiPagedRequest + // DeletedDetails when true includes information about deleted messages + DeletedDetails bool `json:"deleted_details,omitempty"` + // SubjectsFilter when set, returns information on the matched subjects + SubjectsFilter string `json:"subjects_filter,omitempty"` + } + streamInfoResponse = struct { + apiResponse + apiPaged + *StreamInfo + } +) + +func (js *js) StreamInfo(stream string, opts ...JSOpt) (*StreamInfo, error) { + if err := checkStreamName(stream); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + var i int + var subjectMessagesMap map[string]uint64 + var req []byte + var requestPayload bool + + var siOpts StreamInfoRequest + if o.streamInfoOpts != nil { + requestPayload = true + siOpts = *o.streamInfoOpts + } + + for { + if requestPayload { + siOpts.Offset = i + if req, err = json.Marshal(&siOpts); err != nil { + return nil, err + } + } + + siSubj := js.apiSubj(fmt.Sprintf(apiStreamInfoT, stream)) + + r, err := js.apiRequestWithContext(o.ctx, siSubj, req) + if err != nil { + return nil, err + } + + var resp streamInfoResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + var total int + // for backwards compatibility + if resp.Total != 0 { + total = resp.Total + } else { + total = len(resp.State.Subjects) + } + + if requestPayload && len(resp.StreamInfo.State.Subjects) > 0 { + if subjectMessagesMap == nil { + subjectMessagesMap = make(map[string]uint64, total) + } + + for k, j := range resp.State.Subjects { + subjectMessagesMap[k] = j + i++ + } + } + + if i >= total { + if requestPayload { + resp.StreamInfo.State.Subjects = subjectMessagesMap + } + return resp.StreamInfo, nil + } + } +} + +// StreamInfo shows config and current state for this stream. +type StreamInfo struct { + Config StreamConfig `json:"config"` + Created time.Time `json:"created"` + State StreamState `json:"state"` + Cluster *ClusterInfo `json:"cluster,omitempty"` + Mirror *StreamSourceInfo `json:"mirror,omitempty"` + Sources []*StreamSourceInfo `json:"sources,omitempty"` + Alternates []*StreamAlternate `json:"alternates,omitempty"` +} + +// StreamAlternate is an alternate stream represented by a mirror. +type StreamAlternate struct { + Name string `json:"name"` + Domain string `json:"domain,omitempty"` + Cluster string `json:"cluster"` +} + +// StreamSourceInfo shows information about an upstream stream source. +type StreamSourceInfo struct { + Name string `json:"name"` + Lag uint64 `json:"lag"` + Active time.Duration `json:"active"` + External *ExternalStream `json:"external"` + Error *APIError `json:"error"` +} + +// StreamState is information about the given stream. +type StreamState struct { + Msgs uint64 `json:"messages"` + Bytes uint64 `json:"bytes"` + FirstSeq uint64 `json:"first_seq"` + FirstTime time.Time `json:"first_ts"` + LastSeq uint64 `json:"last_seq"` + LastTime time.Time `json:"last_ts"` + Consumers int `json:"consumer_count"` + Deleted []uint64 `json:"deleted"` + NumDeleted int `json:"num_deleted"` + NumSubjects uint64 `json:"num_subjects"` + Subjects map[string]uint64 `json:"subjects"` +} + +// ClusterInfo shows information about the underlying set of servers +// that make up the stream or consumer. +type ClusterInfo struct { + Name string `json:"name,omitempty"` + Leader string `json:"leader,omitempty"` + Replicas []*PeerInfo `json:"replicas,omitempty"` +} + +// PeerInfo shows information about all the peers in the cluster that +// are supporting the stream or consumer. +type PeerInfo struct { + Name string `json:"name"` + Current bool `json:"current"` + Offline bool `json:"offline,omitempty"` + Active time.Duration `json:"active"` + Lag uint64 `json:"lag,omitempty"` +} + +// UpdateStream updates a Stream. +func (js *js) UpdateStream(cfg *StreamConfig, opts ...JSOpt) (*StreamInfo, error) { + if cfg == nil { + return nil, ErrStreamConfigRequired + } + if err := checkStreamName(cfg.Name); err != nil { + return nil, err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + req, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + usSubj := js.apiSubj(fmt.Sprintf(apiStreamUpdateT, cfg.Name)) + r, err := js.apiRequestWithContext(o.ctx, usSubj, req) + if err != nil { + return nil, err + } + var resp streamInfoResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + return resp.StreamInfo, nil +} + +// streamDeleteResponse is the response for a Stream delete request. +type streamDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +// DeleteStream deletes a Stream. +func (js *js) DeleteStream(name string, opts ...JSOpt) error { + if err := checkStreamName(name); err != nil { + return err + } + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiStreamDeleteT, name)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) + if err != nil { + return err + } + var resp streamDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + + if resp.Error != nil { + if errors.Is(resp.Error, ErrStreamNotFound) { + return ErrStreamNotFound + } + return resp.Error + } + return nil +} + +type apiMsgGetRequest struct { + Seq uint64 `json:"seq,omitempty"` + LastFor string `json:"last_by_subj,omitempty"` + NextFor string `json:"next_by_subj,omitempty"` +} + +// RawStreamMsg is a raw message stored in JetStream. +type RawStreamMsg struct { + Subject string + Sequence uint64 + Header Header + Data []byte + Time time.Time +} + +// storedMsg is a raw message stored in JetStream. +type storedMsg struct { + Subject string `json:"subject"` + Sequence uint64 `json:"seq"` + Header []byte `json:"hdrs,omitempty"` + Data []byte `json:"data,omitempty"` + Time time.Time `json:"time"` +} + +// apiMsgGetResponse is the response for a Stream get request. +type apiMsgGetResponse struct { + apiResponse + Message *storedMsg `json:"message,omitempty"` +} + +// GetLastMsg retrieves the last raw stream message stored in JetStream by subject. +func (js *js) GetLastMsg(name, subject string, opts ...JSOpt) (*RawStreamMsg, error) { + return js.getMsg(name, &apiMsgGetRequest{LastFor: subject}, opts...) +} + +// GetMsg retrieves a raw stream message stored in JetStream by sequence number. +func (js *js) GetMsg(name string, seq uint64, opts ...JSOpt) (*RawStreamMsg, error) { + return js.getMsg(name, &apiMsgGetRequest{Seq: seq}, opts...) +} + +// Low level getMsg +func (js *js) getMsg(name string, mreq *apiMsgGetRequest, opts ...JSOpt) (*RawStreamMsg, error) { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return nil, err + } + if cancel != nil { + defer cancel() + } + + if err := checkStreamName(name); err != nil { + return nil, err + } + + var apiSubj string + if o.directGet && mreq.LastFor != _EMPTY_ { + apiSubj = apiDirectMsgGetLastBySubjectT + dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name, mreq.LastFor)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, nil) + if err != nil { + return nil, err + } + return convertDirectGetMsgResponseToMsg(name, r) + } + + if o.directGet { + apiSubj = apiDirectMsgGetT + mreq.NextFor = o.directNextFor + } else { + apiSubj = apiMsgGetT + } + + req, err := json.Marshal(mreq) + if err != nil { + return nil, err + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiSubj, name)) + r, err := js.apiRequestWithContext(o.ctx, dsSubj, req) + if err != nil { + return nil, err + } + + if o.directGet { + return convertDirectGetMsgResponseToMsg(name, r) + } + + var resp apiMsgGetResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return nil, err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrMsgNotFound) { + return nil, ErrMsgNotFound + } + if errors.Is(resp.Error, ErrStreamNotFound) { + return nil, ErrStreamNotFound + } + return nil, resp.Error + } + + msg := resp.Message + + var hdr Header + if len(msg.Header) > 0 { + hdr, err = decodeHeadersMsg(msg.Header) + if err != nil { + return nil, err + } + } + + return &RawStreamMsg{ + Subject: msg.Subject, + Sequence: msg.Sequence, + Header: hdr, + Data: msg.Data, + Time: msg.Time, + }, nil +} + +func convertDirectGetMsgResponseToMsg(name string, r *Msg) (*RawStreamMsg, error) { + // Check for 404/408. We would get a no-payload message and a "Status" header + if len(r.Data) == 0 { + val := r.Header.Get(statusHdr) + if val != _EMPTY_ { + switch val { + case noMessagesSts: + return nil, ErrMsgNotFound + default: + desc := r.Header.Get(descrHdr) + if desc == _EMPTY_ { + desc = "unable to get message" + } + return nil, fmt.Errorf("nats: %s", desc) + } + } + } + // Check for headers that give us the required information to + // reconstruct the message. + if len(r.Header) == 0 { + return nil, fmt.Errorf("nats: response should have headers") + } + stream := r.Header.Get(JSStream) + if stream == _EMPTY_ { + return nil, fmt.Errorf("nats: missing stream header") + } + + // Mirrors can now answer direct gets, so removing check for name equality. + // TODO(dlc) - We could have server also have a header with origin and check that? + + seqStr := r.Header.Get(JSSequence) + if seqStr == _EMPTY_ { + return nil, fmt.Errorf("nats: missing sequence header") + } + seq, err := strconv.ParseUint(seqStr, 10, 64) + if err != nil { + return nil, fmt.Errorf("nats: invalid sequence header '%s': %v", seqStr, err) + } + timeStr := r.Header.Get(JSTimeStamp) + if timeStr == _EMPTY_ { + return nil, fmt.Errorf("nats: missing timestamp header") + } + // Temporary code: the server in main branch is sending with format + // "2006-01-02 15:04:05.999999999 +0000 UTC", but will be changed + // to use format RFC3339Nano. Because of server test deps/cycle, + // support both until the server PR lands. + tm, err := time.Parse(time.RFC3339Nano, timeStr) + if err != nil { + tm, err = time.Parse("2006-01-02 15:04:05.999999999 +0000 UTC", timeStr) + if err != nil { + return nil, fmt.Errorf("nats: invalid timestamp header '%s': %v", timeStr, err) + } + } + subj := r.Header.Get(JSSubject) + if subj == _EMPTY_ { + return nil, fmt.Errorf("nats: missing subject header") + } + return &RawStreamMsg{ + Subject: subj, + Sequence: seq, + Header: r.Header, + Data: r.Data, + Time: tm, + }, nil +} + +type msgDeleteRequest struct { + Seq uint64 `json:"seq"` + NoErase bool `json:"no_erase,omitempty"` +} + +// msgDeleteResponse is the response for a Stream delete request. +type msgDeleteResponse struct { + apiResponse + Success bool `json:"success,omitempty"` +} + +// DeleteMsg deletes a message from a stream. +// The message is marked as erased, but not overwritten +func (js *js) DeleteMsg(name string, seq uint64, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq, NoErase: true}) +} + +// SecureDeleteMsg deletes a message from a stream. The deleted message is overwritten with random data +// As a result, this operation is slower than DeleteMsg() +func (js *js) SecureDeleteMsg(name string, seq uint64, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + return js.deleteMsg(o.ctx, name, &msgDeleteRequest{Seq: seq}) +} + +func (js *js) deleteMsg(ctx context.Context, stream string, req *msgDeleteRequest) error { + if err := checkStreamName(stream); err != nil { + return err + } + reqJSON, err := json.Marshal(req) + if err != nil { + return err + } + + dsSubj := js.apiSubj(fmt.Sprintf(apiMsgDeleteT, stream)) + r, err := js.apiRequestWithContext(ctx, dsSubj, reqJSON) + if err != nil { + return err + } + var resp msgDeleteResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + if resp.Error != nil { + return resp.Error + } + return nil +} + +// StreamPurgeRequest is optional request information to the purge API. +type StreamPurgeRequest struct { + // Purge up to but not including sequence. + Sequence uint64 `json:"seq,omitempty"` + // Subject to match against messages for the purge command. + Subject string `json:"filter,omitempty"` + // Number of messages to keep. + Keep uint64 `json:"keep,omitempty"` +} + +type streamPurgeResponse struct { + apiResponse + Success bool `json:"success,omitempty"` + Purged uint64 `json:"purged"` +} + +// PurgeStream purges messages on a Stream. +func (js *js) PurgeStream(stream string, opts ...JSOpt) error { + if err := checkStreamName(stream); err != nil { + return err + } + var req *StreamPurgeRequest + var ok bool + for _, opt := range opts { + // For PurgeStream, only request body opt is relevant + if req, ok = opt.(*StreamPurgeRequest); ok { + break + } + } + return js.purgeStream(stream, req) +} + +func (js *js) purgeStream(stream string, req *StreamPurgeRequest, opts ...JSOpt) error { + o, cancel, err := getJSContextOpts(js.opts, opts...) + if err != nil { + return err + } + if cancel != nil { + defer cancel() + } + + var b []byte + if req != nil { + if b, err = json.Marshal(req); err != nil { + return err + } + } + + psSubj := js.apiSubj(fmt.Sprintf(apiStreamPurgeT, stream)) + r, err := js.apiRequestWithContext(o.ctx, psSubj, b) + if err != nil { + return err + } + var resp streamPurgeResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + return err + } + if resp.Error != nil { + if errors.Is(resp.Error, ErrBadRequest) { + return fmt.Errorf("%w: %s", ErrBadRequest, "invalid purge request body") + } + return resp.Error + } + return nil +} + +// streamLister fetches pages of StreamInfo objects. This object is not safe +// to use for multiple threads. +type streamLister struct { + js *js + page []*StreamInfo + err error + + offset int + pageInfo *apiPaged +} + +// streamListResponse list of detailed stream information. +// A nil request is valid and means all streams. +type streamListResponse struct { + apiResponse + apiPaged + Streams []*StreamInfo `json:"streams"` +} + +// streamNamesRequest is used for Stream Name requests. +type streamNamesRequest struct { + apiPagedRequest + // These are filters that can be applied to the list. + Subject string `json:"subject,omitempty"` +} + +// Next fetches the next StreamInfo page. +func (s *streamLister) Next() bool { + if s.err != nil { + return false + } + if s.pageInfo != nil && s.offset >= s.pageInfo.Total { + return false + } + + req, err := json.Marshal(streamNamesRequest{ + apiPagedRequest: apiPagedRequest{Offset: s.offset}, + Subject: s.js.opts.streamListSubject, + }) + if err != nil { + s.err = err + return false + } + + var cancel context.CancelFunc + ctx := s.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), s.js.opts.wait) + defer cancel() + } + + slSubj := s.js.apiSubj(apiStreamListT) + r, err := s.js.apiRequestWithContext(ctx, slSubj, req) + if err != nil { + s.err = err + return false + } + var resp streamListResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + s.err = err + return false + } + if resp.Error != nil { + s.err = resp.Error + return false + } + + s.pageInfo = &resp.apiPaged + s.page = resp.Streams + s.offset += len(s.page) + return true +} + +// Page returns the current StreamInfo page. +func (s *streamLister) Page() []*StreamInfo { + return s.page +} + +// Err returns any errors found while fetching pages. +func (s *streamLister) Err() error { + return s.err +} + +// Streams can be used to retrieve a list of StreamInfo objects. +func (jsc *js) Streams(opts ...JSOpt) <-chan *StreamInfo { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan *StreamInfo) + l := &streamLister{js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// StreamsInfo can be used to retrieve a list of StreamInfo objects. +// DEPRECATED: Use Streams() instead. +func (jsc *js) StreamsInfo(opts ...JSOpt) <-chan *StreamInfo { + return jsc.Streams(opts...) +} + +type streamNamesLister struct { + js *js + + err error + offset int + page []string + pageInfo *apiPaged +} + +// Next fetches the next stream names page. +func (l *streamNamesLister) Next() bool { + if l.err != nil { + return false + } + if l.pageInfo != nil && l.offset >= l.pageInfo.Total { + return false + } + + var cancel context.CancelFunc + ctx := l.js.opts.ctx + if ctx == nil { + ctx, cancel = context.WithTimeout(context.Background(), l.js.opts.wait) + defer cancel() + } + + req, err := json.Marshal(streamNamesRequest{ + apiPagedRequest: apiPagedRequest{Offset: l.offset}, + Subject: l.js.opts.streamListSubject, + }) + if err != nil { + l.err = err + return false + } + r, err := l.js.apiRequestWithContext(ctx, l.js.apiSubj(apiStreams), req) + if err != nil { + l.err = err + return false + } + var resp streamNamesResponse + if err := json.Unmarshal(r.Data, &resp); err != nil { + l.err = err + return false + } + if resp.Error != nil { + l.err = resp.Error + return false + } + + l.pageInfo = &resp.apiPaged + l.page = resp.Streams + l.offset += len(l.page) + return true +} + +// Page returns the current ConsumerInfo page. +func (l *streamNamesLister) Page() []string { + return l.page +} + +// Err returns any errors found while fetching pages. +func (l *streamNamesLister) Err() error { + return l.err +} + +// StreamNames is used to retrieve a list of Stream names. +func (jsc *js) StreamNames(opts ...JSOpt) <-chan string { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return nil + } + + ch := make(chan string) + l := &streamNamesLister{js: &js{nc: jsc.nc, opts: o}} + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + select { + case ch <- info: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// StreamNameBySubject returns a stream name that matches the subject. +func (jsc *js) StreamNameBySubject(subj string, opts ...JSOpt) (string, error) { + o, cancel, err := getJSContextOpts(jsc.opts, opts...) + if err != nil { + return "", err + } + if cancel != nil { + defer cancel() + } + + var slr streamNamesResponse + req := &streamRequest{subj} + j, err := json.Marshal(req) + if err != nil { + return _EMPTY_, err + } + + resp, err := jsc.apiRequestWithContext(o.ctx, jsc.apiSubj(apiStreams), j) + if err != nil { + if err == ErrNoResponders { + err = ErrJetStreamNotEnabled + } + return _EMPTY_, err + } + if err := json.Unmarshal(resp.Data, &slr); err != nil { + return _EMPTY_, err + } + + if slr.Error != nil || len(slr.Streams) != 1 { + return _EMPTY_, ErrNoMatchingStream + } + return slr.Streams[0], nil +} + +func getJSContextOpts(defs *jsOpts, opts ...JSOpt) (*jsOpts, context.CancelFunc, error) { + var o jsOpts + for _, opt := range opts { + if err := opt.configureJSContext(&o); err != nil { + return nil, nil, err + } + } + + // Check for option collisions. Right now just timeout and context. + if o.ctx != nil && o.wait != 0 { + return nil, nil, ErrContextAndTimeout + } + if o.wait == 0 && o.ctx == nil { + o.wait = defs.wait + } + var cancel context.CancelFunc + if o.ctx == nil && o.wait > 0 { + o.ctx, cancel = context.WithTimeout(context.Background(), o.wait) + } + if o.pre == _EMPTY_ { + o.pre = defs.pre + } + + return &o, cancel, nil +} diff --git a/vendor/github.com/nats-io/nats.go/kv.go b/vendor/github.com/nats-io/nats.go/kv.go new file mode 100644 index 00000000..5f416bc5 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/kv.go @@ -0,0 +1,1084 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "context" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" + "sync" + "time" +) + +// KeyValueManager is used to manage KeyValue stores. +type KeyValueManager interface { + // KeyValue will lookup and bind to an existing KeyValue store. + KeyValue(bucket string) (KeyValue, error) + // CreateKeyValue will create a KeyValue store with the following configuration. + CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) + // DeleteKeyValue will delete this KeyValue store (JetStream stream). + DeleteKeyValue(bucket string) error + // KeyValueStoreNames is used to retrieve a list of key value store names + KeyValueStoreNames() <-chan string + // KeyValueStores is used to retrieve a list of key value store statuses + KeyValueStores() <-chan KeyValueStatus +} + +// KeyValue contains methods to operate on a KeyValue store. +type KeyValue interface { + // Get returns the latest value for the key. + Get(key string) (entry KeyValueEntry, err error) + // GetRevision returns a specific revision value for the key. + GetRevision(key string, revision uint64) (entry KeyValueEntry, err error) + // Put will place the new value for the key into the store. + Put(key string, value []byte) (revision uint64, err error) + // PutString will place the string for the key into the store. + PutString(key string, value string) (revision uint64, err error) + // Create will add the key/value pair iff it does not exist. + Create(key string, value []byte) (revision uint64, err error) + // Update will update the value iff the latest revision matches. + Update(key string, value []byte, last uint64) (revision uint64, err error) + // Delete will place a delete marker and leave all revisions. + Delete(key string, opts ...DeleteOpt) error + // Purge will place a delete marker and remove all previous revisions. + Purge(key string, opts ...DeleteOpt) error + // Watch for any updates to keys that match the keys argument which could include wildcards. + // Watch will send a nil entry when it has received all initial values. + Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) + // WatchAll will invoke the callback for all updates. + WatchAll(opts ...WatchOpt) (KeyWatcher, error) + // Keys will return all keys. + Keys(opts ...WatchOpt) ([]string, error) + // History will return all historical values for the key. + History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) + // Bucket returns the current bucket name. + Bucket() string + // PurgeDeletes will remove all current delete markers. + PurgeDeletes(opts ...PurgeOpt) error + // Status retrieves the status and configuration of a bucket + Status() (KeyValueStatus, error) +} + +// KeyValueStatus is run-time status about a Key-Value bucket +type KeyValueStatus interface { + // Bucket the name of the bucket + Bucket() string + + // Values is how many messages are in the bucket, including historical values + Values() uint64 + + // History returns the configured history kept per key + History() int64 + + // TTL is how long the bucket keeps values for + TTL() time.Duration + + // BackingStore indicates what technology is used for storage of the bucket + BackingStore() string + + // Bytes returns the size in bytes of the bucket + Bytes() uint64 +} + +// KeyWatcher is what is returned when doing a watch. +type KeyWatcher interface { + // Context returns watcher context optionally provided by nats.Context option. + Context() context.Context + // Updates returns a channel to read any updates to entries. + Updates() <-chan KeyValueEntry + // Stop will stop this watcher. + Stop() error +} + +type WatchOpt interface { + configureWatcher(opts *watchOpts) error +} + +// For nats.Context() support. +func (ctx ContextOpt) configureWatcher(opts *watchOpts) error { + opts.ctx = ctx + return nil +} + +type watchOpts struct { + ctx context.Context + // Do not send delete markers to the update channel. + ignoreDeletes bool + // Include all history per subject, not just last one. + includeHistory bool + // retrieve only the meta data of the entry + metaOnly bool +} + +type watchOptFn func(opts *watchOpts) error + +func (opt watchOptFn) configureWatcher(opts *watchOpts) error { + return opt(opts) +} + +// IncludeHistory instructs the key watcher to include historical values as well. +func IncludeHistory() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.includeHistory = true + return nil + }) +} + +// IgnoreDeletes will have the key watcher not pass any deleted keys. +func IgnoreDeletes() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.ignoreDeletes = true + return nil + }) +} + +// MetaOnly instructs the key watcher to retrieve only the entry meta data, not the entry value +func MetaOnly() WatchOpt { + return watchOptFn(func(opts *watchOpts) error { + opts.metaOnly = true + return nil + }) +} + +type PurgeOpt interface { + configurePurge(opts *purgeOpts) error +} + +type purgeOpts struct { + dmthr time.Duration // Delete markers threshold + ctx context.Context +} + +// DeleteMarkersOlderThan indicates that delete or purge markers older than that +// will be deleted as part of PurgeDeletes() operation, otherwise, only the data +// will be removed but markers that are recent will be kept. +// Note that if no option is specified, the default is 30 minutes. You can set +// this option to a negative value to instruct to always remove the markers, +// regardless of their age. +type DeleteMarkersOlderThan time.Duration + +func (ttl DeleteMarkersOlderThan) configurePurge(opts *purgeOpts) error { + opts.dmthr = time.Duration(ttl) + return nil +} + +// For nats.Context() support. +func (ctx ContextOpt) configurePurge(opts *purgeOpts) error { + opts.ctx = ctx + return nil +} + +type DeleteOpt interface { + configureDelete(opts *deleteOpts) error +} + +type deleteOpts struct { + // Remove all previous revisions. + purge bool + + // Delete only if the latest revision matches. + revision uint64 +} + +type deleteOptFn func(opts *deleteOpts) error + +func (opt deleteOptFn) configureDelete(opts *deleteOpts) error { + return opt(opts) +} + +// LastRevision deletes if the latest revision matches. +func LastRevision(revision uint64) DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.revision = revision + return nil + }) +} + +// purge removes all previous revisions. +func purge() DeleteOpt { + return deleteOptFn(func(opts *deleteOpts) error { + opts.purge = true + return nil + }) +} + +// KeyValueConfig is for configuring a KeyValue store. +type KeyValueConfig struct { + Bucket string + Description string + MaxValueSize int32 + History uint8 + TTL time.Duration + MaxBytes int64 + Storage StorageType + Replicas int + Placement *Placement + RePublish *RePublish + Mirror *StreamSource + Sources []*StreamSource +} + +// Used to watch all keys. +const ( + KeyValueMaxHistory = 64 + AllKeys = ">" + kvLatestRevision = 0 + kvop = "KV-Operation" + kvdel = "DEL" + kvpurge = "PURGE" +) + +type KeyValueOp uint8 + +const ( + KeyValuePut KeyValueOp = iota + KeyValueDelete + KeyValuePurge +) + +func (op KeyValueOp) String() string { + switch op { + case KeyValuePut: + return "KeyValuePutOp" + case KeyValueDelete: + return "KeyValueDeleteOp" + case KeyValuePurge: + return "KeyValuePurgeOp" + default: + return "Unknown Operation" + } +} + +// KeyValueEntry is a retrieved entry for Get or List or Watch. +type KeyValueEntry interface { + // Bucket is the bucket the data was loaded from. + Bucket() string + // Key is the key that was retrieved. + Key() string + // Value is the retrieved value. + Value() []byte + // Revision is a unique sequence for this value. + Revision() uint64 + // Created is the time the data was put in the bucket. + Created() time.Time + // Delta is distance from the latest value. + Delta() uint64 + // Operation returns Put or Delete or Purge. + Operation() KeyValueOp +} + +// Errors +var ( + ErrKeyValueConfigRequired = errors.New("nats: config required") + ErrInvalidBucketName = errors.New("nats: invalid bucket name") + ErrInvalidKey = errors.New("nats: invalid key") + ErrBucketNotFound = errors.New("nats: bucket not found") + ErrBadBucket = errors.New("nats: bucket not valid key-value store") + ErrKeyNotFound = errors.New("nats: key not found") + ErrKeyDeleted = errors.New("nats: key was deleted") + ErrHistoryToLarge = errors.New("nats: history limited to a max of 64") + ErrNoKeysFound = errors.New("nats: no keys found") +) + +var ( + ErrKeyExists JetStreamError = &jsError{apiErr: &APIError{ErrorCode: JSErrCodeStreamWrongLastSequence, Code: 400}, message: "key exists"} +) + +const ( + kvBucketNamePre = "KV_" + kvBucketNameTmpl = "KV_%s" + kvSubjectsTmpl = "$KV.%s.>" + kvSubjectsPreTmpl = "$KV.%s." + kvSubjectsPreDomainTmpl = "%s.$KV.%s." + kvNoPending = "0" +) + +// Regex for valid keys and buckets. +var ( + validBucketRe = regexp.MustCompile(`\A[a-zA-Z0-9_-]+\z`) + validKeyRe = regexp.MustCompile(`\A[-/_=\.a-zA-Z0-9]+\z`) +) + +// KeyValue will lookup and bind to an existing KeyValue store. +func (js *js) KeyValue(bucket string) (KeyValue, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, errors.New("nats: key-value requires at least server version 2.6.2") + } + if !validBucketRe.MatchString(bucket) { + return nil, ErrInvalidBucketName + } + stream := fmt.Sprintf(kvBucketNameTmpl, bucket) + si, err := js.StreamInfo(stream) + if err != nil { + if err == ErrStreamNotFound { + err = ErrBucketNotFound + } + return nil, err + } + // Do some quick sanity checks that this is a correctly formed stream for KV. + // Max msgs per subject should be > 0. + if si.Config.MaxMsgsPerSubject < 1 { + return nil, ErrBadBucket + } + + return mapStreamToKVS(js, si), nil +} + +// CreateKeyValue will create a KeyValue store with the following configuration. +func (js *js) CreateKeyValue(cfg *KeyValueConfig) (KeyValue, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, errors.New("nats: key-value requires at least server version 2.6.2") + } + if cfg == nil { + return nil, ErrKeyValueConfigRequired + } + if !validBucketRe.MatchString(cfg.Bucket) { + return nil, ErrInvalidBucketName + } + if _, err := js.AccountInfo(); err != nil { + return nil, err + } + + // Default to 1 for history. Max is 64 for now. + history := int64(1) + if cfg.History > 0 { + if cfg.History > KeyValueMaxHistory { + return nil, ErrHistoryToLarge + } + history = int64(cfg.History) + } + + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + maxMsgSize := cfg.MaxValueSize + if maxMsgSize == 0 { + maxMsgSize = -1 + } + // When stream's MaxAge is not set, server uses 2 minutes as the default + // for the duplicate window. If MaxAge is set, and lower than 2 minutes, + // then the duplicate window will be set to that. If MaxAge is greater, + // we will cap the duplicate window to 2 minutes (to be consistent with + // previous behavior). + duplicateWindow := 2 * time.Minute + if cfg.TTL > 0 && cfg.TTL < duplicateWindow { + duplicateWindow = cfg.TTL + } + scfg := &StreamConfig{ + Name: fmt.Sprintf(kvBucketNameTmpl, cfg.Bucket), + Description: cfg.Description, + MaxMsgsPerSubject: history, + MaxBytes: maxBytes, + MaxAge: cfg.TTL, + MaxMsgSize: maxMsgSize, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + AllowRollup: true, + DenyDelete: true, + Duplicates: duplicateWindow, + MaxMsgs: -1, + MaxConsumers: -1, + AllowDirect: true, + RePublish: cfg.RePublish, + } + if cfg.Mirror != nil { + // Copy in case we need to make changes so we do not change caller's version. + m := cfg.Mirror.copy() + if !strings.HasPrefix(m.Name, kvBucketNamePre) { + m.Name = fmt.Sprintf(kvBucketNameTmpl, m.Name) + } + scfg.Mirror = m + scfg.MirrorDirect = true + } else if len(cfg.Sources) > 0 { + // For now we do not allow direct subjects for sources. If that is desired a user could use stream API directly. + for _, ss := range cfg.Sources { + if !strings.HasPrefix(ss.Name, kvBucketNamePre) { + ss = ss.copy() + ss.Name = fmt.Sprintf(kvBucketNameTmpl, ss.Name) + } + scfg.Sources = append(scfg.Sources, ss) + } + } else { + scfg.Subjects = []string{fmt.Sprintf(kvSubjectsTmpl, cfg.Bucket)} + } + + // If we are at server version 2.7.2 or above use DiscardNew. We can not use DiscardNew for 2.7.1 or below. + if js.nc.serverMinVersion(2, 7, 2) { + scfg.Discard = DiscardNew + } + + si, err := js.AddStream(scfg) + if err != nil { + // If we have a failure to add, it could be because we have + // a config change if the KV was created against a pre 2.7.2 + // and we are now moving to a v2.7.2+. If that is the case + // and the only difference is the discard policy, then update + // the stream. + // The same logic applies for KVs created pre 2.9.x and + // the AllowDirect setting. + if err == ErrStreamNameAlreadyInUse { + if si, _ = js.StreamInfo(scfg.Name); si != nil { + // To compare, make the server's stream info discard + // policy same than ours. + si.Config.Discard = scfg.Discard + // Also need to set allow direct for v2.9.x+ + si.Config.AllowDirect = scfg.AllowDirect + if reflect.DeepEqual(&si.Config, scfg) { + si, err = js.UpdateStream(scfg) + } + } + } + if err != nil { + return nil, err + } + } + return mapStreamToKVS(js, si), nil +} + +// DeleteKeyValue will delete this KeyValue store (JetStream stream). +func (js *js) DeleteKeyValue(bucket string) error { + if !validBucketRe.MatchString(bucket) { + return ErrInvalidBucketName + } + stream := fmt.Sprintf(kvBucketNameTmpl, bucket) + return js.DeleteStream(stream) +} + +type kvs struct { + name string + stream string + pre string + putPre string + js *js + // If true, it means that APIPrefix/Domain was set in the context + // and we need to add something to some of our high level protocols + // (such as Put, etc..) + useJSPfx bool + // To know if we can use the stream direct get API + useDirect bool +} + +// Underlying entry. +type kve struct { + bucket string + key string + value []byte + revision uint64 + delta uint64 + created time.Time + op KeyValueOp +} + +func (e *kve) Bucket() string { return e.bucket } +func (e *kve) Key() string { return e.key } +func (e *kve) Value() []byte { return e.value } +func (e *kve) Revision() uint64 { return e.revision } +func (e *kve) Created() time.Time { return e.created } +func (e *kve) Delta() uint64 { return e.delta } +func (e *kve) Operation() KeyValueOp { return e.op } + +func keyValid(key string) bool { + if len(key) == 0 || key[0] == '.' || key[len(key)-1] == '.' { + return false + } + return validKeyRe.MatchString(key) +} + +// Get returns the latest value for the key. +func (kv *kvs) Get(key string) (KeyValueEntry, error) { + e, err := kv.get(key, kvLatestRevision) + if err != nil { + if err == ErrKeyDeleted { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +// GetRevision returns a specific revision value for the key. +func (kv *kvs) GetRevision(key string, revision uint64) (KeyValueEntry, error) { + e, err := kv.get(key, revision) + if err != nil { + if err == ErrKeyDeleted { + return nil, ErrKeyNotFound + } + return nil, err + } + + return e, nil +} + +func (kv *kvs) get(key string, revision uint64) (KeyValueEntry, error) { + if !keyValid(key) { + return nil, ErrInvalidKey + } + + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(key) + + var m *RawStreamMsg + var err error + var _opts [1]JSOpt + opts := _opts[:0] + if kv.useDirect { + opts = append(opts, DirectGet()) + } + + if revision == kvLatestRevision { + m, err = kv.js.GetLastMsg(kv.stream, b.String(), opts...) + } else { + m, err = kv.js.GetMsg(kv.stream, revision, opts...) + // If a sequence was provided, just make sure that the retrieved + // message subject matches the request. + if err == nil && m.Subject != b.String() { + return nil, ErrKeyNotFound + } + } + if err != nil { + if err == ErrMsgNotFound { + err = ErrKeyNotFound + } + return nil, err + } + + entry := &kve{ + bucket: kv.name, + key: key, + value: m.Data, + revision: m.Sequence, + created: m.Time, + } + + // Double check here that this is not a DEL Operation marker. + if len(m.Header) > 0 { + switch m.Header.Get(kvop) { + case kvdel: + entry.op = KeyValueDelete + return entry, ErrKeyDeleted + case kvpurge: + entry.op = KeyValuePurge + return entry, ErrKeyDeleted + } + } + + return entry, nil +} + +// Put will place the new value for the key into the store. +func (kv *kvs) Put(key string, value []byte) (revision uint64, err error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + if kv.putPre != _EMPTY_ { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + pa, err := kv.js.Publish(b.String(), value) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// PutString will place the string for the key into the store. +func (kv *kvs) PutString(key string, value string) (revision uint64, err error) { + return kv.Put(key, []byte(value)) +} + +// Create will add the key/value pair iff it does not exist. +func (kv *kvs) Create(key string, value []byte) (revision uint64, err error) { + v, err := kv.Update(key, value, 0) + if err == nil { + return v, nil + } + + // TODO(dlc) - Since we have tombstones for DEL ops for watchers, this could be from that + // so we need to double check. + if e, err := kv.get(key, kvLatestRevision); err == ErrKeyDeleted { + return kv.Update(key, value, e.Revision()) + } + + // Check if the expected last subject sequence is not zero which implies + // the key already exists. + if errors.Is(err, ErrKeyExists) { + jserr := ErrKeyExists.(*jsError) + return 0, fmt.Errorf("%w: %s", err, jserr.message) + } + + return 0, err +} + +// Update will update the value iff the latest revision matches. +func (kv *kvs) Update(key string, value []byte, revision uint64) (uint64, error) { + if !keyValid(key) { + return 0, ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + b.WriteString(kv.pre) + b.WriteString(key) + + m := Msg{Subject: b.String(), Header: Header{}, Data: value} + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(revision, 10)) + + pa, err := kv.js.PublishMsg(&m) + if err != nil { + return 0, err + } + return pa.Sequence, err +} + +// Delete will place a delete marker and leave all revisions. +func (kv *kvs) Delete(key string, opts ...DeleteOpt) error { + if !keyValid(key) { + return ErrInvalidKey + } + + var b strings.Builder + if kv.useJSPfx { + b.WriteString(kv.js.opts.pre) + } + if kv.putPre != _EMPTY_ { + b.WriteString(kv.putPre) + } else { + b.WriteString(kv.pre) + } + b.WriteString(key) + + // DEL op marker. For watch functionality. + m := NewMsg(b.String()) + + var o deleteOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureDelete(&o); err != nil { + return err + } + } + } + + if o.purge { + m.Header.Set(kvop, kvpurge) + m.Header.Set(MsgRollup, MsgRollupSubject) + } else { + m.Header.Set(kvop, kvdel) + } + + if o.revision != 0 { + m.Header.Set(ExpectedLastSubjSeqHdr, strconv.FormatUint(o.revision, 10)) + } + + _, err := kv.js.PublishMsg(m) + return err +} + +// Purge will remove the key and all revisions. +func (kv *kvs) Purge(key string, opts ...DeleteOpt) error { + return kv.Delete(key, append(opts, purge())...) +} + +const kvDefaultPurgeDeletesMarkerThreshold = 30 * time.Minute + +// PurgeDeletes will remove all current delete markers. +// This is a maintenance option if there is a larger buildup of delete markers. +// See DeleteMarkersOlderThan() option for more information. +func (kv *kvs) PurgeDeletes(opts ...PurgeOpt) error { + var o purgeOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configurePurge(&o); err != nil { + return err + } + } + } + // Transfer possible context purge option to the watcher. This is the + // only option that matters for the PurgeDeletes() feature. + var wopts []WatchOpt + if o.ctx != nil { + wopts = append(wopts, Context(o.ctx)) + } + watcher, err := kv.WatchAll(wopts...) + if err != nil { + return err + } + defer watcher.Stop() + + var limit time.Time + olderThan := o.dmthr + // Negative value is used to instruct to always remove markers, regardless + // of age. If set to 0 (or not set), use our default value. + if olderThan == 0 { + olderThan = kvDefaultPurgeDeletesMarkerThreshold + } + if olderThan > 0 { + limit = time.Now().Add(-olderThan) + } + + var deleteMarkers []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + if op := entry.Operation(); op == KeyValueDelete || op == KeyValuePurge { + deleteMarkers = append(deleteMarkers, entry) + } + } + + var ( + pr StreamPurgeRequest + b strings.Builder + ) + // Do actual purges here. + for _, entry := range deleteMarkers { + b.WriteString(kv.pre) + b.WriteString(entry.Key()) + pr.Subject = b.String() + pr.Keep = 0 + if olderThan > 0 && entry.Created().After(limit) { + pr.Keep = 1 + } + if err := kv.js.purgeStream(kv.stream, &pr); err != nil { + return err + } + b.Reset() + } + return nil +} + +// Keys() will return all keys. +func (kv *kvs) Keys(opts ...WatchOpt) ([]string, error) { + opts = append(opts, IgnoreDeletes(), MetaOnly()) + watcher, err := kv.WatchAll(opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var keys []string + for entry := range watcher.Updates() { + if entry == nil { + break + } + keys = append(keys, entry.Key()) + } + if len(keys) == 0 { + return nil, ErrNoKeysFound + } + return keys, nil +} + +// History will return all values for the key. +func (kv *kvs) History(key string, opts ...WatchOpt) ([]KeyValueEntry, error) { + opts = append(opts, IncludeHistory()) + watcher, err := kv.Watch(key, opts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + + var entries []KeyValueEntry + for entry := range watcher.Updates() { + if entry == nil { + break + } + entries = append(entries, entry) + } + if len(entries) == 0 { + return nil, ErrKeyNotFound + } + return entries, nil +} + +// Implementation for Watch +type watcher struct { + mu sync.Mutex + updates chan KeyValueEntry + sub *Subscription + initDone bool + initPending uint64 + received uint64 + ctx context.Context +} + +// Context returns the context for the watcher if set. +func (w *watcher) Context() context.Context { + if w == nil { + return nil + } + return w.ctx +} + +// Updates returns the interior channel. +func (w *watcher) Updates() <-chan KeyValueEntry { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *watcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +// WatchAll watches all keys. +func (kv *kvs) WatchAll(opts ...WatchOpt) (KeyWatcher, error) { + return kv.Watch(AllKeys, opts...) +} + +// Watch will fire the callback when a key that matches the keys pattern is updated. +// keys needs to be a valid NATS subject. +func (kv *kvs) Watch(keys string, opts ...WatchOpt) (KeyWatcher, error) { + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + // Could be a pattern so don't check for validity as we normally do. + var b strings.Builder + b.WriteString(kv.pre) + b.WriteString(keys) + keys = b.String() + + // We will block below on placing items on the chan. That is by design. + w := &watcher{updates: make(chan KeyValueEntry, 256), ctx: o.ctx} + + update := func(m *Msg) { + tokens, err := getMetadataFields(m.Reply) + if err != nil { + return + } + if len(m.Subject) <= len(kv.pre) { + return + } + subj := m.Subject[len(kv.pre):] + + var op KeyValueOp + if len(m.Header) > 0 { + switch m.Header.Get(kvop) { + case kvdel: + op = KeyValueDelete + case kvpurge: + op = KeyValuePurge + } + } + delta := uint64(parseNum(tokens[ackNumPendingTokenPos])) + w.mu.Lock() + defer w.mu.Unlock() + if !o.ignoreDeletes || (op != KeyValueDelete && op != KeyValuePurge) { + entry := &kve{ + bucket: kv.name, + key: subj, + value: m.Data, + revision: uint64(parseNum(tokens[ackStreamSeqTokenPos])), + created: time.Unix(0, parseNum(tokens[ackTimestampSeqTokenPos])), + delta: delta, + op: op, + } + w.updates <- entry + } + // Check if done and initial values. + if !w.initDone { + w.received++ + // We set this on the first trip through.. + if w.initPending == 0 { + w.initPending = delta + } + if w.received > w.initPending || delta == 0 { + w.initDone = true + w.updates <- nil + } + } + } + + // Used ordered consumer to deliver results. + subOpts := []SubOpt{BindStream(kv.stream), OrderedConsumer()} + if !o.includeHistory { + subOpts = append(subOpts, DeliverLastPerSubject()) + } + if o.metaOnly { + subOpts = append(subOpts, HeadersOnly()) + } + if o.ctx != nil { + subOpts = append(subOpts, Context(o.ctx)) + } + // Create the sub and rest of initialization under the lock. + // We want to prevent the race between this code and the + // update() callback. + w.mu.Lock() + defer w.mu.Unlock() + sub, err := kv.js.Subscribe(keys, update, subOpts...) + if err != nil { + return nil, err + } + sub.mu.Lock() + // If there were no pending messages at the time of the creation + // of the consumer, send the marker. + if sub.jsi != nil && sub.jsi.pending == 0 { + w.initDone = true + w.updates <- nil + } + // Set us up to close when the waitForMessages func returns. + sub.pDone = func() { + close(w.updates) + } + sub.mu.Unlock() + + w.sub = sub + return w, nil +} + +// Bucket returns the current bucket name (JetStream stream). +func (kv *kvs) Bucket() string { + return kv.name +} + +// KeyValueBucketStatus represents status of a Bucket, implements KeyValueStatus +type KeyValueBucketStatus struct { + nfo *StreamInfo + bucket string +} + +// Bucket the name of the bucket +func (s *KeyValueBucketStatus) Bucket() string { return s.bucket } + +// Values is how many messages are in the bucket, including historical values +func (s *KeyValueBucketStatus) Values() uint64 { return s.nfo.State.Msgs } + +// History returns the configured history kept per key +func (s *KeyValueBucketStatus) History() int64 { return s.nfo.Config.MaxMsgsPerSubject } + +// TTL is how long the bucket keeps values for +func (s *KeyValueBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *KeyValueBucketStatus) BackingStore() string { return "JetStream" } + +// StreamInfo is the stream info retrieved to create the status +func (s *KeyValueBucketStatus) StreamInfo() *StreamInfo { return s.nfo } + +// Bytes is the size of the stream +func (s *KeyValueBucketStatus) Bytes() uint64 { return s.nfo.State.Bytes } + +// Status retrieves the status and configuration of a bucket +func (kv *kvs) Status() (KeyValueStatus, error) { + nfo, err := kv.js.StreamInfo(kv.stream) + if err != nil { + return nil, err + } + + return &KeyValueBucketStatus{nfo: nfo, bucket: kv.name}, nil +} + +// KeyValueStoreNames is used to retrieve a list of key value store names +func (js *js) KeyValueStoreNames() <-chan string { + ch := make(chan string) + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") + go func() { + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { + continue + } + ch <- info.Config.Name + } + } + }() + + return ch +} + +// KeyValueStores is used to retrieve a list of key value store statuses +func (js *js) KeyValueStores() <-chan KeyValueStatus { + ch := make(chan KeyValueStatus) + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(kvSubjectsTmpl, "*") + go func() { + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, kvBucketNamePre) { + continue + } + ch <- &KeyValueBucketStatus{nfo: info, bucket: strings.TrimPrefix(info.Config.Name, kvBucketNamePre)} + } + } + }() + return ch +} + +func mapStreamToKVS(js *js, info *StreamInfo) *kvs { + bucket := strings.TrimPrefix(info.Config.Name, kvBucketNamePre) + + kv := &kvs{ + name: bucket, + stream: info.Config.Name, + pre: fmt.Sprintf(kvSubjectsPreTmpl, bucket), + js: js, + // Determine if we need to use the JS prefix in front of Put and Delete operations + useJSPfx: js.opts.pre != defaultAPIPrefix, + useDirect: info.Config.AllowDirect, + } + + // If we are mirroring, we will have mirror direct on, so just use the mirror name + // and override use + if m := info.Config.Mirror; m != nil { + bucket := strings.TrimPrefix(m.Name, kvBucketNamePre) + if m.External != nil && m.External.APIPrefix != _EMPTY_ { + kv.useJSPfx = false + kv.pre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + kv.putPre = fmt.Sprintf(kvSubjectsPreDomainTmpl, m.External.APIPrefix, bucket) + } else { + kv.putPre = fmt.Sprintf(kvSubjectsPreTmpl, bucket) + } + } + + return kv +} diff --git a/vendor/github.com/nats-io/nats.go/nats.go b/vendor/github.com/nats-io/nats.go/nats.go new file mode 100644 index 00000000..ee53ea91 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/nats.go @@ -0,0 +1,5540 @@ +// Copyright 2012-2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "math/rand" + "net" + "net/http" + "net/textproto" + "net/url" + "os" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/nats-io/nkeys" + "github.com/nats-io/nuid" + + "github.com/nats-io/nats.go/util" +) + +// Default Constants +const ( + Version = "1.24.0" + DefaultURL = "nats://127.0.0.1:4222" + DefaultPort = 4222 + DefaultMaxReconnect = 60 + DefaultReconnectWait = 2 * time.Second + DefaultReconnectJitter = 100 * time.Millisecond + DefaultReconnectJitterTLS = time.Second + DefaultTimeout = 2 * time.Second + DefaultPingInterval = 2 * time.Minute + DefaultMaxPingOut = 2 + DefaultMaxChanLen = 64 * 1024 // 64k + DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB + RequestChanLen = 8 + DefaultDrainTimeout = 30 * time.Second + LangString = "go" +) + +const ( + // STALE_CONNECTION is for detection and proper handling of stale connections. + STALE_CONNECTION = "stale connection" + + // PERMISSIONS_ERR is for when nats server subject authorization has failed. + PERMISSIONS_ERR = "permissions violation" + + // AUTHORIZATION_ERR is for when nats server user authorization has failed. + AUTHORIZATION_ERR = "authorization violation" + + // AUTHENTICATION_EXPIRED_ERR is for when nats server user authorization has expired. + AUTHENTICATION_EXPIRED_ERR = "user authentication expired" + + // AUTHENTICATION_REVOKED_ERR is for when user authorization has been revoked. + AUTHENTICATION_REVOKED_ERR = "user authentication revoked" + + // ACCOUNT_AUTHENTICATION_EXPIRED_ERR is for when nats server account authorization has expired. + ACCOUNT_AUTHENTICATION_EXPIRED_ERR = "account authentication expired" + + // MAX_CONNECTIONS_ERR is for when nats server denies the connection due to server max_connections limit + MAX_CONNECTIONS_ERR = "maximum connections exceeded" +) + +// Errors +var ( + ErrConnectionClosed = errors.New("nats: connection closed") + ErrConnectionDraining = errors.New("nats: connection draining") + ErrDrainTimeout = errors.New("nats: draining connection timed out") + ErrConnectionReconnecting = errors.New("nats: connection reconnecting") + ErrSecureConnRequired = errors.New("nats: secure connection required") + ErrSecureConnWanted = errors.New("nats: secure connection not available") + ErrBadSubscription = errors.New("nats: invalid subscription") + ErrTypeSubscription = errors.New("nats: invalid subscription type") + ErrBadSubject = errors.New("nats: invalid subject") + ErrBadQueueName = errors.New("nats: invalid queue name") + ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") + ErrTimeout = errors.New("nats: timeout") + ErrBadTimeout = errors.New("nats: timeout invalid") + ErrAuthorization = errors.New("nats: authorization violation") + ErrAuthExpired = errors.New("nats: authentication expired") + ErrAuthRevoked = errors.New("nats: authentication revoked") + ErrAccountAuthExpired = errors.New("nats: account authentication expired") + ErrNoServers = errors.New("nats: no servers available for connection") + ErrJsonParse = errors.New("nats: connect message, json parse error") + ErrChanArg = errors.New("nats: argument needs to be a channel type") + ErrMaxPayload = errors.New("nats: maximum payload exceeded") + ErrMaxMessages = errors.New("nats: maximum messages delivered") + ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") + ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") + ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") + ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") + ErrInvalidConnection = errors.New("nats: invalid connection") + ErrInvalidMsg = errors.New("nats: invalid message or message nil") + ErrInvalidArg = errors.New("nats: invalid argument") + ErrInvalidContext = errors.New("nats: invalid context") + ErrNoDeadlineContext = errors.New("nats: context requires a deadline") + ErrNoEchoNotSupported = errors.New("nats: no echo option not supported by this server") + ErrClientIDNotSupported = errors.New("nats: client ID not supported by this server") + ErrUserButNoSigCB = errors.New("nats: user callback defined without a signature handler") + ErrNkeyButNoSigCB = errors.New("nats: nkey defined without a signature handler") + ErrNoUserCB = errors.New("nats: user callback not defined") + ErrNkeyAndUser = errors.New("nats: user callback and nkey defined") + ErrNkeysNotSupported = errors.New("nats: nkeys not supported by the server") + ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) + ErrTokenAlreadySet = errors.New("nats: token and token handler both set") + ErrMsgNotBound = errors.New("nats: message is not bound to subscription/connection") + ErrMsgNoReply = errors.New("nats: message does not have a reply") + ErrClientIPNotSupported = errors.New("nats: client IP not supported by this server") + ErrDisconnected = errors.New("nats: server is disconnected") + ErrHeadersNotSupported = errors.New("nats: headers not supported by this server") + ErrBadHeaderMsg = errors.New("nats: message could not decode headers") + ErrNoResponders = errors.New("nats: no responders available for request") + ErrMaxConnectionsExceeded = errors.New("nats: server maximum connections exceeded") + ErrConnectionNotTLS = errors.New("nats: connection is not tls") +) + +// GetDefaultOptions returns default configuration options for the client. +func GetDefaultOptions() Options { + return Options{ + AllowReconnect: true, + MaxReconnect: DefaultMaxReconnect, + ReconnectWait: DefaultReconnectWait, + ReconnectJitter: DefaultReconnectJitter, + ReconnectJitterTLS: DefaultReconnectJitterTLS, + Timeout: DefaultTimeout, + PingInterval: DefaultPingInterval, + MaxPingsOut: DefaultMaxPingOut, + SubChanLen: DefaultMaxChanLen, + ReconnectBufSize: DefaultReconnectBufSize, + DrainTimeout: DefaultDrainTimeout, + } +} + +// DEPRECATED: Use GetDefaultOptions() instead. +// DefaultOptions is not safe for use by multiple clients. +// For details see #308. +var DefaultOptions = GetDefaultOptions() + +// Status represents the state of the connection. +type Status int + +const ( + DISCONNECTED = Status(iota) + CONNECTED + CLOSED + RECONNECTING + CONNECTING + DRAINING_SUBS + DRAINING_PUBS +) + +func (s Status) String() string { + switch s { + case DISCONNECTED: + return "DISCONNECTED" + case CONNECTED: + return "CONNECTED" + case CLOSED: + return "CLOSED" + case RECONNECTING: + return "RECONNECTING" + case CONNECTING: + return "CONNECTING" + case DRAINING_SUBS: + return "DRAINING_SUBS" + case DRAINING_PUBS: + return "DRAINING_PUBS" + } + return "unknown status" +} + +// ConnHandler is used for asynchronous events such as +// disconnected and closed connections. +type ConnHandler func(*Conn) + +// ConnErrHandler is used to process asynchronous events like +// disconnected connection with the error (if any). +type ConnErrHandler func(*Conn, error) + +// ErrHandler is used to process asynchronous errors encountered +// while processing inbound messages. +type ErrHandler func(*Conn, *Subscription, error) + +// UserJWTHandler is used to fetch and return the account signed +// JWT for this user. +type UserJWTHandler func() (string, error) + +// SignatureHandler is used to sign a nonce from the server while +// authenticating with nkeys. The user should sign the nonce and +// return the raw signature. The client will base64 encode this to +// send to the server. +type SignatureHandler func([]byte) ([]byte, error) + +// AuthTokenHandler is used to generate a new token. +type AuthTokenHandler func() string + +// ReconnectDelayHandler is used to get from the user the desired +// delay the library should pause before attempting to reconnect +// again. Note that this is invoked after the library tried the +// whole list of URLs and failed to reconnect. +type ReconnectDelayHandler func(attempts int) time.Duration + +// asyncCB is used to preserve order for async callbacks. +type asyncCB struct { + f func() + next *asyncCB +} + +type asyncCallbacksHandler struct { + mu sync.Mutex + cond *sync.Cond + head *asyncCB + tail *asyncCB +} + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// CustomDialer can be used to specify any dialer, not necessarily a +// *net.Dialer. A CustomDialer may also implement `SkipTLSHandshake() bool` +// in order to skip the TLS handshake in case not required. +type CustomDialer interface { + Dial(network, address string) (net.Conn, error) +} + +type InProcessConnProvider interface { + InProcessConn() (net.Conn, error) +} + +// Options can be used to create a customized connection. +type Options struct { + + // Url represents a single NATS server url to which the client + // will be connecting. If the Servers option is also set, it + // then becomes the first server in the Servers array. + Url string + + // InProcessServer represents a NATS server running within the + // same process. If this is set then we will attempt to connect + // to the server directly rather than using external TCP conns. + InProcessServer InProcessConnProvider + + // Servers is a configured set of servers which this client + // will use when attempting to connect. + Servers []string + + // NoRandomize configures whether we will randomize the + // server pool. + NoRandomize bool + + // NoEcho configures whether the server will echo back messages + // that are sent on this connection if we also have matching subscriptions. + // Note this is supported on servers >= version 1.2. Proto 1 or greater. + NoEcho bool + + // Name is an optional name label which will be sent to the server + // on CONNECT to identify the client. + Name string + + // Verbose signals the server to send an OK ack for commands + // successfully processed by the server. + Verbose bool + + // Pedantic signals the server whether it should be doing further + // validation of subjects. + Pedantic bool + + // Secure enables TLS secure connections that skip server + // verification by default. NOT RECOMMENDED. + Secure bool + + // TLSConfig is a custom TLS configuration to use for secure + // transports. + TLSConfig *tls.Config + + // AllowReconnect enables reconnection logic to be used when we + // encounter a disconnect from the current server. + AllowReconnect bool + + // MaxReconnect sets the number of reconnect attempts that will be + // tried before giving up. If negative, then it will never give up + // trying to reconnect. + // Defaults to 60. + MaxReconnect int + + // ReconnectWait sets the time to backoff after attempting a reconnect + // to a server that we were already connected to previously. + // Defaults to 2s. + ReconnectWait time.Duration + + // CustomReconnectDelayCB is invoked after the library tried every + // URL in the server list and failed to reconnect. It passes to the + // user the current number of attempts. This function returns the + // amount of time the library will sleep before attempting to reconnect + // again. It is strongly recommended that this value contains some + // jitter to prevent all connections to attempt reconnecting at the same time. + CustomReconnectDelayCB ReconnectDelayHandler + + // ReconnectJitter sets the upper bound for a random delay added to + // ReconnectWait during a reconnect when no TLS is used. + // Defaults to 100ms. + ReconnectJitter time.Duration + + // ReconnectJitterTLS sets the upper bound for a random delay added to + // ReconnectWait during a reconnect when TLS is used. + // Defaults to 1s. + ReconnectJitterTLS time.Duration + + // Timeout sets the timeout for a Dial operation on a connection. + // Defaults to 2s. + Timeout time.Duration + + // DrainTimeout sets the timeout for a Drain Operation to complete. + // Defaults to 30s. + DrainTimeout time.Duration + + // FlusherTimeout is the maximum time to wait for write operations + // to the underlying connection to complete (including the flusher loop). + FlusherTimeout time.Duration + + // PingInterval is the period at which the client will be sending ping + // commands to the server, disabled if 0 or negative. + // Defaults to 2m. + PingInterval time.Duration + + // MaxPingsOut is the maximum number of pending ping commands that can + // be awaiting a response before raising an ErrStaleConnection error. + // Defaults to 2. + MaxPingsOut int + + // ClosedCB sets the closed handler that is called when a client will + // no longer be connected. + ClosedCB ConnHandler + + // DisconnectedCB sets the disconnected handler that is called + // whenever the connection is disconnected. + // Will not be called if DisconnectedErrCB is set + // DEPRECATED. Use DisconnectedErrCB which passes error that caused + // the disconnect event. + DisconnectedCB ConnHandler + + // DisconnectedErrCB sets the disconnected error handler that is called + // whenever the connection is disconnected. + // Disconnected error could be nil, for instance when user explicitly closes the connection. + // DisconnectedCB will not be called if DisconnectedErrCB is set + DisconnectedErrCB ConnErrHandler + + // ConnectedCB sets the connected handler called when the initial connection + // is established. It is not invoked on successful reconnects - for reconnections, + // use ReconnectedCB. ConnectedCB can be used in conjunction with RetryOnFailedConnect + // to detect whether the initial connect was successful. + ConnectedCB ConnHandler + + // ReconnectedCB sets the reconnected handler called whenever + // the connection is successfully reconnected. + ReconnectedCB ConnHandler + + // DiscoveredServersCB sets the callback that is invoked whenever a new + // server has joined the cluster. + DiscoveredServersCB ConnHandler + + // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) + AsyncErrorCB ErrHandler + + // ReconnectBufSize is the size of the backing bufio during reconnect. + // Once this has been exhausted publish operations will return an error. + // Defaults to 8388608 bytes (8MB). + ReconnectBufSize int + + // SubChanLen is the size of the buffered channel used between the socket + // Go routine and the message delivery for SyncSubscriptions. + // NOTE: This does not affect AsyncSubscriptions which are + // dictated by PendingLimits() + // Defaults to 65536. + SubChanLen int + + // UserJWT sets the callback handler that will fetch a user's JWT. + UserJWT UserJWTHandler + + // Nkey sets the public nkey that will be used to authenticate + // when connecting to the server. UserJWT and Nkey are mutually exclusive + // and if defined, UserJWT will take precedence. + Nkey string + + // SignatureCB designates the function used to sign the nonce + // presented from the server. + SignatureCB SignatureHandler + + // User sets the username to be used when connecting to the server. + User string + + // Password sets the password to be used when connecting to a server. + Password string + + // Token sets the token to be used when connecting to a server. + Token string + + // TokenHandler designates the function used to generate the token to be used when connecting to a server. + TokenHandler AuthTokenHandler + + // Dialer allows a custom net.Dialer when forming connections. + // DEPRECATED: should use CustomDialer instead. + Dialer *net.Dialer + + // CustomDialer allows to specify a custom dialer (not necessarily + // a *net.Dialer). + CustomDialer CustomDialer + + // UseOldRequestStyle forces the old method of Requests that utilize + // a new Inbox and a new Subscription for each request. + UseOldRequestStyle bool + + // NoCallbacksAfterClientClose allows preventing the invocation of + // callbacks after Close() is called. Client won't receive notifications + // when Close is invoked by user code. Default is to invoke the callbacks. + NoCallbacksAfterClientClose bool + + // LameDuckModeHandler sets the callback to invoke when the server notifies + // the connection that it entered lame duck mode, that is, going to + // gradually disconnect all its connections before shutting down. This is + // often used in deployments when upgrading NATS Servers. + LameDuckModeHandler ConnHandler + + // RetryOnFailedConnect sets the connection in reconnecting state right + // away if it can't connect to a server in the initial set. The + // MaxReconnect and ReconnectWait options are used for this process, + // similarly to when an established connection is disconnected. + // If a ReconnectHandler is set, it will be invoked on the first + // successful reconnect attempt (if the initial connect fails), + // and if a ClosedHandler is set, it will be invoked if + // it fails to connect (after exhausting the MaxReconnect attempts). + RetryOnFailedConnect bool + + // For websocket connections, indicates to the server that the connection + // supports compression. If the server does too, then data will be compressed. + Compression bool + + // For websocket connections, adds a path to connections url. + // This is useful when connecting to NATS behind a proxy. + ProxyPath string + + // InboxPrefix allows the default _INBOX prefix to be customized + InboxPrefix string + + // IgnoreAuthErrorAbort - if set to true, client opts out of the default connect behavior of aborting + // subsequent reconnect attempts if server returns the same auth error twice (regardless of reconnect policy). + IgnoreAuthErrorAbort bool + + // SkipHostLookup skips the DNS lookup for the server hostname. + SkipHostLookup bool +} + +const ( + // Scratch storage for assembling protocol headers + scratchSize = 512 + + // The size of the bufio reader/writer on top of the socket. + defaultBufSize = 32768 + + // The buffered size of the flush "kick" channel + flushChanSize = 1 + + // Default server pool size + srvPoolSize = 4 + + // NUID size + nuidSize = 22 + + // Default ports used if none is specified in given URL(s) + defaultWSPortString = "80" + defaultWSSPortString = "443" + defaultPortString = "4222" +) + +// A Conn represents a bare connection to a nats-server. +// It can send and receive []byte payloads. +// The connection is safe to use in multiple Go routines concurrently. +type Conn struct { + // Keep all members for which we use atomic at the beginning of the + // struct and make sure they are all 64bits (or use padding if necessary). + // atomic.* functions crash on 32bit machines if operand is not aligned + // at 64bit. See https://github.com/golang/go/issues/599 + Statistics + mu sync.RWMutex + // Opts holds the configuration of the Conn. + // Modifying the configuration of a running Conn is a race. + Opts Options + wg sync.WaitGroup + srvPool []*srv + current *srv + urls map[string]struct{} // Keep track of all known URLs (used by processInfo) + conn net.Conn + bw *natsWriter + br *natsReader + fch chan struct{} + info serverInfo + ssid int64 + subsMu sync.RWMutex + subs map[int64]*Subscription + ach *asyncCallbacksHandler + pongs []chan struct{} + scratch [scratchSize]byte + status Status + initc bool // true if the connection is performing the initial connect + err error + ps *parseState + ptmr *time.Timer + pout int + ar bool // abort reconnect + rqch chan struct{} + ws bool // true if a websocket connection + + // New style response handler + respSub string // The wildcard subject + respSubPrefix string // the wildcard prefix including trailing . + respSubLen int // the length of the wildcard prefix excluding trailing . + respScanf string // The scanf template to extract mux token + respMux *Subscription // A single response subscription + respMap map[string]chan *Msg // Request map for the response msg channels + respRand *rand.Rand // Used for generating suffix + + // Msg filters for testing. + // Protected by subsMu + filters map[string]msgFilter +} + +type natsReader struct { + r io.Reader + buf []byte + off int + n int +} + +type natsWriter struct { + w io.Writer + bufs []byte + limit int + pending *bytes.Buffer + plimit int +} + +// Subscription represents interest in a given subject. +type Subscription struct { + mu sync.Mutex + sid int64 + + // Subject that represents this subscription. This can be different + // than the received subject inside a Msg if this is a wildcard. + Subject string + + // Optional queue group name. If present, all subscriptions with the + // same name will form a distributed queue, and each message will + // only be processed by one member of the group. + Queue string + + // For holding information about a JetStream consumer. + jsi *jsSub + + delivered uint64 + max uint64 + conn *Conn + mcb MsgHandler + mch chan *Msg + closed bool + sc bool + connClosed bool + + // Type of Subscription + typ SubscriptionType + + // Async linked list + pHead *Msg + pTail *Msg + pCond *sync.Cond + pDone func() + + // Pending stats, async subscriptions, high-speed etc. + pMsgs int + pBytes int + pMsgsMax int + pBytesMax int + pMsgsLimit int + pBytesLimit int + dropped int +} + +// Msg represents a message delivered by NATS. This structure is used +// by Subscribers and PublishMsg(). +// +// # Types of Acknowledgements +// +// In case using JetStream, there are multiple ways to ack a Msg: +// +// // Acknowledgement that a message has been processed. +// msg.Ack() +// +// // Negatively acknowledges a message. +// msg.Nak() +// +// // Terminate a message so that it is not redelivered further. +// msg.Term() +// +// // Signal the server that the message is being worked on and reset redelivery timer. +// msg.InProgress() +type Msg struct { + Subject string + Reply string + Header Header + Data []byte + Sub *Subscription + // Internal + next *Msg + wsz int + barrier *barrierInfo + ackd uint32 +} + +// Compares two msgs, ignores sub but checks all other public fields. +func (m *Msg) Equal(msg *Msg) bool { + if m == msg { + return true + } + if m == nil || msg == nil { + return false + } + if m.Subject != msg.Subject || m.Reply != msg.Reply { + return false + } + if !bytes.Equal(m.Data, msg.Data) { + return false + } + if len(m.Header) != len(msg.Header) { + return false + } + for k, v := range m.Header { + val, ok := msg.Header[k] + if !ok || len(v) != len(val) { + return false + } + for i, hdr := range v { + if hdr != val[i] { + return false + } + } + } + return true +} + +func (m *Msg) headerBytes() ([]byte, error) { + var hdr []byte + if len(m.Header) == 0 { + return hdr, nil + } + + var b bytes.Buffer + _, err := b.WriteString(hdrLine) + if err != nil { + return nil, ErrBadHeaderMsg + } + + err = http.Header(m.Header).Write(&b) + if err != nil { + return nil, ErrBadHeaderMsg + } + + _, err = b.WriteString(crlf) + if err != nil { + return nil, ErrBadHeaderMsg + } + + return b.Bytes(), nil +} + +type barrierInfo struct { + refs int64 + f func() +} + +// Tracks various stats received and sent on this connection, +// including counts for messages and bytes. +type Statistics struct { + InMsgs uint64 + OutMsgs uint64 + InBytes uint64 + OutBytes uint64 + Reconnects uint64 +} + +// Tracks individual backend servers. +type srv struct { + url *url.URL + didConnect bool + reconnects int + lastErr error + isImplicit bool + tlsName string +} + +// The INFO block received from the server. +type serverInfo struct { + ID string `json:"server_id"` + Name string `json:"server_name"` + Proto int `json:"proto"` + Version string `json:"version"` + Host string `json:"host"` + Port int `json:"port"` + Headers bool `json:"headers"` + AuthRequired bool `json:"auth_required,omitempty"` + TLSRequired bool `json:"tls_required,omitempty"` + TLSAvailable bool `json:"tls_available,omitempty"` + MaxPayload int64 `json:"max_payload"` + CID uint64 `json:"client_id,omitempty"` + ClientIP string `json:"client_ip,omitempty"` + Nonce string `json:"nonce,omitempty"` + Cluster string `json:"cluster,omitempty"` + ConnectURLs []string `json:"connect_urls,omitempty"` + LameDuckMode bool `json:"ldm,omitempty"` +} + +const ( + // clientProtoZero is the original client protocol from 2009. + // http://nats.io/documentation/internals/nats-protocol/ + /* clientProtoZero */ _ = iota + // clientProtoInfo signals a client can receive more then the original INFO block. + // This can be used to update clients on other cluster members, etc. + clientProtoInfo +) + +type connectInfo struct { + Verbose bool `json:"verbose"` + Pedantic bool `json:"pedantic"` + UserJWT string `json:"jwt,omitempty"` + Nkey string `json:"nkey,omitempty"` + Signature string `json:"sig,omitempty"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` + TLS bool `json:"tls_required"` + Name string `json:"name"` + Lang string `json:"lang"` + Version string `json:"version"` + Protocol int `json:"protocol"` + Echo bool `json:"echo"` + Headers bool `json:"headers"` + NoResponders bool `json:"no_responders"` +} + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// Connect will attempt to connect to the NATS system. +// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 +// Comma separated arrays are also supported, e.g. urlA, urlB. +// Options start with the defaults but can be overridden. +// To connect to a NATS Server's websocket port, use the `ws` or `wss` scheme, such as +// `ws://localhost:8080`. Note that websocket schemes cannot be mixed with others (nats/tls). +func Connect(url string, options ...Option) (*Conn, error) { + opts := GetDefaultOptions() + opts.Servers = processUrlString(url) + for _, opt := range options { + if opt != nil { + if err := opt(&opts); err != nil { + return nil, err + } + } + } + return opts.Connect() +} + +// Options that can be passed to Connect. + +// Name is an Option to set the client name. +func Name(name string) Option { + return func(o *Options) error { + o.Name = name + return nil + } +} + +// InProcessServer is an Option that will try to establish a direction to a NATS server +// running within the process instead of dialing via TCP. +func InProcessServer(server InProcessConnProvider) Option { + return func(o *Options) error { + o.InProcessServer = server + return nil + } +} + +// Secure is an Option to enable TLS secure connections that skip server verification by default. +// Pass a TLS Configuration for proper TLS. +// NOTE: This should NOT be used in a production setting. +func Secure(tls ...*tls.Config) Option { + return func(o *Options) error { + o.Secure = true + // Use of variadic just simplifies testing scenarios. We only take the first one. + if len(tls) > 1 { + return ErrMultipleTLSConfigs + } + if len(tls) == 1 { + o.TLSConfig = tls[0] + } + return nil + } +} + +// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. +// If Secure is not already set this will set it as well. +func RootCAs(file ...string) Option { + return func(o *Options) error { + pool := x509.NewCertPool() + for _, f := range file { + rootPEM, err := os.ReadFile(f) + if err != nil || rootPEM == nil { + return fmt.Errorf("nats: error loading or parsing rootCA file: %w", err) + } + ok := pool.AppendCertsFromPEM(rootPEM) + if !ok { + return fmt.Errorf("nats: failed to parse root certificate from %q", f) + } + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.RootCAs = pool + o.Secure = true + return nil + } +} + +// ClientCert is a helper option to provide the client certificate from a file. +// If Secure is not already set this will set it as well. +func ClientCert(certFile, keyFile string) Option { + return func(o *Options) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("nats: error loading client certificate: %w", err) + } + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("nats: error parsing client certificate: %w", err) + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.Certificates = []tls.Certificate{cert} + o.Secure = true + return nil + } +} + +// NoReconnect is an Option to turn off reconnect behavior. +func NoReconnect() Option { + return func(o *Options) error { + o.AllowReconnect = false + return nil + } +} + +// DontRandomize is an Option to turn off randomizing the server pool. +func DontRandomize() Option { + return func(o *Options) error { + o.NoRandomize = true + return nil + } +} + +// NoEcho is an Option to turn off messages echoing back from a server. +// Note this is supported on servers >= version 1.2. Proto 1 or greater. +func NoEcho() Option { + return func(o *Options) error { + o.NoEcho = true + return nil + } +} + +// ReconnectWait is an Option to set the wait time between reconnect attempts. +// Defaults to 2s. +func ReconnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ReconnectWait = t + return nil + } +} + +// MaxReconnects is an Option to set the maximum number of reconnect attempts. +// Defaults to 60. +func MaxReconnects(max int) Option { + return func(o *Options) error { + o.MaxReconnect = max + return nil + } +} + +// ReconnectJitter is an Option to set the upper bound of a random delay added ReconnectWait. +// Defaults to 100ms and 1s, respectively. +func ReconnectJitter(jitter, jitterForTLS time.Duration) Option { + return func(o *Options) error { + o.ReconnectJitter = jitter + o.ReconnectJitterTLS = jitterForTLS + return nil + } +} + +// CustomReconnectDelay is an Option to set the CustomReconnectDelayCB option. +// See CustomReconnectDelayCB Option for more details. +func CustomReconnectDelay(cb ReconnectDelayHandler) Option { + return func(o *Options) error { + o.CustomReconnectDelayCB = cb + return nil + } +} + +// PingInterval is an Option to set the period for client ping commands. +// Defaults to 2m. +func PingInterval(t time.Duration) Option { + return func(o *Options) error { + o.PingInterval = t + return nil + } +} + +// MaxPingsOutstanding is an Option to set the maximum number of ping requests +// that can go unanswered by the server before closing the connection. +// Defaults to 2. +func MaxPingsOutstanding(max int) Option { + return func(o *Options) error { + o.MaxPingsOut = max + return nil + } +} + +// ReconnectBufSize sets the buffer size of messages kept while busy reconnecting. +// Defaults to 8388608 bytes (8MB). It can be disabled by setting it to -1. +func ReconnectBufSize(size int) Option { + return func(o *Options) error { + o.ReconnectBufSize = size + return nil + } +} + +// Timeout is an Option to set the timeout for Dial on a connection. +// Defaults to 2s. +func Timeout(t time.Duration) Option { + return func(o *Options) error { + o.Timeout = t + return nil + } +} + +// FlusherTimeout is an Option to set the write (and flush) timeout on a connection. +func FlusherTimeout(t time.Duration) Option { + return func(o *Options) error { + o.FlusherTimeout = t + return nil + } +} + +// DrainTimeout is an Option to set the timeout for draining a connection. +// Defaults to 30s. +func DrainTimeout(t time.Duration) Option { + return func(o *Options) error { + o.DrainTimeout = t + return nil + } +} + +// DisconnectErrHandler is an Option to set the disconnected error handler. +func DisconnectErrHandler(cb ConnErrHandler) Option { + return func(o *Options) error { + o.DisconnectedErrCB = cb + return nil + } +} + +// DisconnectHandler is an Option to set the disconnected handler. +// DEPRECATED: Use DisconnectErrHandler. +func DisconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DisconnectedCB = cb + return nil + } +} + +// ConnectHandler is an Option to set the connected handler. +func ConnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ConnectedCB = cb + return nil + } +} + +// ReconnectHandler is an Option to set the reconnected handler. +func ReconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ReconnectedCB = cb + return nil + } +} + +// ClosedHandler is an Option to set the closed handler. +func ClosedHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ClosedCB = cb + return nil + } +} + +// DiscoveredServersHandler is an Option to set the new servers handler. +func DiscoveredServersHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DiscoveredServersCB = cb + return nil + } +} + +// ErrorHandler is an Option to set the async error handler. +func ErrorHandler(cb ErrHandler) Option { + return func(o *Options) error { + o.AsyncErrorCB = cb + return nil + } +} + +// UserInfo is an Option to set the username and password to +// use when not included directly in the URLs. +func UserInfo(user, password string) Option { + return func(o *Options) error { + o.User = user + o.Password = password + return nil + } +} + +// Token is an Option to set the token to use +// when a token is not included directly in the URLs +// and when a token handler is not provided. +func Token(token string) Option { + return func(o *Options) error { + if o.TokenHandler != nil { + return ErrTokenAlreadySet + } + o.Token = token + return nil + } +} + +// TokenHandler is an Option to set the token handler to use +// when a token is not included directly in the URLs +// and when a token is not set. +func TokenHandler(cb AuthTokenHandler) Option { + return func(o *Options) error { + if o.Token != "" { + return ErrTokenAlreadySet + } + o.TokenHandler = cb + return nil + } +} + +// UserCredentials is a convenience function that takes a filename +// for a user's JWT and a filename for the user's private Nkey seed. +func UserCredentials(userOrChainedFile string, seedFiles ...string) Option { + userCB := func() (string, error) { + return userFromFile(userOrChainedFile) + } + var keyFile string + if len(seedFiles) > 0 { + keyFile = seedFiles[0] + } else { + keyFile = userOrChainedFile + } + sigCB := func(nonce []byte) ([]byte, error) { + return sigHandler(nonce, keyFile) + } + return UserJWT(userCB, sigCB) +} + +// UserJWTAndSeed is a convenience function that takes the JWT and seed +// values as strings. +func UserJWTAndSeed(jwt string, seed string) Option { + userCB := func() (string, error) { + return jwt, nil + } + + sigCB := func(nonce []byte) ([]byte, error) { + kp, err := nkeys.FromSeed([]byte(seed)) + if err != nil { + return nil, fmt.Errorf("unable to extract key pair from seed: %w", err) + } + // Wipe our key on exit. + defer kp.Wipe() + + sig, _ := kp.Sign(nonce) + return sig, nil + } + + return UserJWT(userCB, sigCB) +} + +// UserJWT will set the callbacks to retrieve the user's JWT and +// the signature callback to sign the server nonce. This an the Nkey +// option are mutually exclusive. +func UserJWT(userCB UserJWTHandler, sigCB SignatureHandler) Option { + return func(o *Options) error { + if userCB == nil { + return ErrNoUserCB + } + if sigCB == nil { + return ErrUserButNoSigCB + } + // Smoke test the user callback to ensure it is setup properly + // when processing options. + if _, err := userCB(); err != nil { + return err + } + + o.UserJWT = userCB + o.SignatureCB = sigCB + return nil + } +} + +// Nkey will set the public Nkey and the signature callback to +// sign the server nonce. +func Nkey(pubKey string, sigCB SignatureHandler) Option { + return func(o *Options) error { + o.Nkey = pubKey + o.SignatureCB = sigCB + if pubKey != "" && sigCB == nil { + return ErrNkeyButNoSigCB + } + return nil + } +} + +// SyncQueueLen will set the maximum queue len for the internal +// channel used for SubscribeSync(). +// Defaults to 65536. +func SyncQueueLen(max int) Option { + return func(o *Options) error { + o.SubChanLen = max + return nil + } +} + +// Dialer is an Option to set the dialer which will be used when +// attempting to establish a connection. +// DEPRECATED: Should use CustomDialer instead. +func Dialer(dialer *net.Dialer) Option { + return func(o *Options) error { + o.Dialer = dialer + return nil + } +} + +// SetCustomDialer is an Option to set a custom dialer which will be +// used when attempting to establish a connection. If both Dialer +// and CustomDialer are specified, CustomDialer takes precedence. +func SetCustomDialer(dialer CustomDialer) Option { + return func(o *Options) error { + o.CustomDialer = dialer + return nil + } +} + +// UseOldRequestStyle is an Option to force usage of the old Request style. +func UseOldRequestStyle() Option { + return func(o *Options) error { + o.UseOldRequestStyle = true + return nil + } +} + +// NoCallbacksAfterClientClose is an Option to disable callbacks when user code +// calls Close(). If close is initiated by any other condition, callbacks +// if any will be invoked. +func NoCallbacksAfterClientClose() Option { + return func(o *Options) error { + o.NoCallbacksAfterClientClose = true + return nil + } +} + +// LameDuckModeHandler sets the callback to invoke when the server notifies +// the connection that it entered lame duck mode, that is, going to +// gradually disconnect all its connections before shutting down. This is +// often used in deployments when upgrading NATS Servers. +func LameDuckModeHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.LameDuckModeHandler = cb + return nil + } +} + +// RetryOnFailedConnect sets the connection in reconnecting state right away +// if it can't connect to a server in the initial set. +// See RetryOnFailedConnect option for more details. +func RetryOnFailedConnect(retry bool) Option { + return func(o *Options) error { + o.RetryOnFailedConnect = retry + return nil + } +} + +// Compression is an Option to indicate if this connection supports +// compression. Currently only supported for Websocket connections. +func Compression(enabled bool) Option { + return func(o *Options) error { + o.Compression = enabled + return nil + } +} + +// ProxyPath is an option for websocket connections that adds a path to connections url. +// This is useful when connecting to NATS behind a proxy. +func ProxyPath(path string) Option { + return func(o *Options) error { + o.ProxyPath = path + return nil + } +} + +// CustomInboxPrefix configures the request + reply inbox prefix +func CustomInboxPrefix(p string) Option { + return func(o *Options) error { + if p == "" || strings.Contains(p, ">") || strings.Contains(p, "*") || strings.HasSuffix(p, ".") { + return fmt.Errorf("nats: invalid custom prefix") + } + o.InboxPrefix = p + return nil + } +} + +// IgnoreAuthErrorAbort opts out of the default connect behavior of aborting +// subsequent reconnect attempts if server returns the same auth error twice. +func IgnoreAuthErrorAbort() Option { + return func(o *Options) error { + o.IgnoreAuthErrorAbort = true + return nil + } +} + +// SkipHostLookup is an Option to skip the host lookup when connecting to a server. +func SkipHostLookup() Option { + return func(o *Options) error { + o.SkipHostLookup = true + return nil + } +} + +// Handler processing + +// SetDisconnectHandler will set the disconnect event handler. +// DEPRECATED: Use SetDisconnectErrHandler +func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedCB = dcb +} + +// SetDisconnectErrHandler will set the disconnect event handler. +func (nc *Conn) SetDisconnectErrHandler(dcb ConnErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedErrCB = dcb +} + +// DisconnectErrHandler will return the disconnect event handler. +func (nc *Conn) DisconnectErrHandler() ConnErrHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.DisconnectedErrCB +} + +// SetReconnectHandler will set the reconnect event handler. +func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ReconnectedCB = rcb +} + +// ReconnectHandler will return the reconnect event handler. +func (nc *Conn) ReconnectHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.ReconnectedCB +} + +// SetDiscoveredServersHandler will set the discovered servers handler. +func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DiscoveredServersCB = dscb +} + +// DiscoveredServersHandler will return the discovered servers handler. +func (nc *Conn) DiscoveredServersHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.DiscoveredServersCB +} + +// SetClosedHandler will set the closed event handler. +func (nc *Conn) SetClosedHandler(cb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ClosedCB = cb +} + +// ClosedHandler will return the closed event handler. +func (nc *Conn) ClosedHandler() ConnHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.ClosedCB +} + +// SetErrorHandler will set the async error handler. +func (nc *Conn) SetErrorHandler(cb ErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.AsyncErrorCB = cb +} + +// ErrorHandler will return the async error handler. +func (nc *Conn) ErrorHandler() ErrHandler { + if nc == nil { + return nil + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.Opts.AsyncErrorCB +} + +// Process the url string argument to Connect. +// Return an array of urls, even if only one. +func processUrlString(url string) []string { + urls := strings.Split(url, ",") + var j int + for _, s := range urls { + u := strings.TrimSpace(s) + if len(u) > 0 { + urls[j] = u + j++ + } + } + return urls[:j] +} + +// Connect will attempt to connect to a NATS server with multiple options. +func (o Options) Connect() (*Conn, error) { + nc := &Conn{Opts: o} + + // Some default options processing. + if nc.Opts.MaxPingsOut == 0 { + nc.Opts.MaxPingsOut = DefaultMaxPingOut + } + // Allow old default for channel length to work correctly. + if nc.Opts.SubChanLen == 0 { + nc.Opts.SubChanLen = DefaultMaxChanLen + } + // Default ReconnectBufSize + if nc.Opts.ReconnectBufSize == 0 { + nc.Opts.ReconnectBufSize = DefaultReconnectBufSize + } + // Ensure that Timeout is not 0 + if nc.Opts.Timeout == 0 { + nc.Opts.Timeout = DefaultTimeout + } + + // Check first for user jwt callback being defined and nkey. + if nc.Opts.UserJWT != nil && nc.Opts.Nkey != "" { + return nil, ErrNkeyAndUser + } + + // Check if we have an nkey but no signature callback defined. + if nc.Opts.Nkey != "" && nc.Opts.SignatureCB == nil { + return nil, ErrNkeyButNoSigCB + } + + // Allow custom Dialer for connecting using a timeout by default + if nc.Opts.Dialer == nil { + nc.Opts.Dialer = &net.Dialer{ + Timeout: nc.Opts.Timeout, + } + } + + if err := nc.setupServerPool(); err != nil { + return nil, err + } + + // Create the async callback handler. + nc.ach = &asyncCallbacksHandler{} + nc.ach.cond = sync.NewCond(&nc.ach.mu) + + // Set a default error handler that will print to stderr. + if nc.Opts.AsyncErrorCB == nil { + nc.Opts.AsyncErrorCB = defaultErrHandler + } + + // Create reader/writer + nc.newReaderWriter() + + connectionEstablished, err := nc.connect() + if err != nil { + return nil, err + } + + // Spin up the async cb dispatcher on success + go nc.ach.asyncCBDispatcher() + + if connectionEstablished && nc.Opts.ConnectedCB != nil { + nc.ach.push(func() { nc.Opts.ConnectedCB(nc) }) + } + + return nc, nil +} + +func defaultErrHandler(nc *Conn, sub *Subscription, err error) { + var cid uint64 + if nc != nil { + nc.mu.RLock() + cid = nc.info.CID + nc.mu.RUnlock() + } + var errStr string + if sub != nil { + var subject string + sub.mu.Lock() + if sub.jsi != nil { + subject = sub.jsi.psubj + } else { + subject = sub.Subject + } + sub.mu.Unlock() + errStr = fmt.Sprintf("%s on connection [%d] for subscription on %q\n", err.Error(), cid, subject) + } else { + errStr = fmt.Sprintf("%s on connection [%d]\n", err.Error(), cid) + } + os.Stderr.WriteString(errStr) +} + +const ( + _CRLF_ = "\r\n" + _EMPTY_ = "" + _SPC_ = " " + _PUB_P_ = "PUB " + _HPUB_P_ = "HPUB " +) + +var _CRLF_BYTES_ = []byte(_CRLF_) + +const ( + _OK_OP_ = "+OK" + _ERR_OP_ = "-ERR" + _PONG_OP_ = "PONG" + _INFO_OP_ = "INFO" +) + +const ( + connectProto = "CONNECT %s" + _CRLF_ + pingProto = "PING" + _CRLF_ + pongProto = "PONG" + _CRLF_ + subProto = "SUB %s %s %d" + _CRLF_ + unsubProto = "UNSUB %d %s" + _CRLF_ + okProto = _OK_OP_ + _CRLF_ +) + +// Return the currently selected server +func (nc *Conn) currentServer() (int, *srv) { + for i, s := range nc.srvPool { + if s == nil { + continue + } + if s == nc.current { + return i, s + } + } + return -1, nil +} + +// Pop the current server and put onto the end of the list. Select head of list as long +// as number of reconnect attempts under MaxReconnect. +func (nc *Conn) selectNextServer() (*srv, error) { + i, s := nc.currentServer() + if i < 0 { + return nil, ErrNoServers + } + sp := nc.srvPool + num := len(sp) + copy(sp[i:num-1], sp[i+1:num]) + maxReconnect := nc.Opts.MaxReconnect + if maxReconnect < 0 || s.reconnects < maxReconnect { + nc.srvPool[num-1] = s + } else { + nc.srvPool = sp[0 : num-1] + } + if len(nc.srvPool) <= 0 { + nc.current = nil + return nil, ErrNoServers + } + nc.current = nc.srvPool[0] + return nc.srvPool[0], nil +} + +// Will assign the correct server to nc.current +func (nc *Conn) pickServer() error { + nc.current = nil + if len(nc.srvPool) <= 0 { + return ErrNoServers + } + + for _, s := range nc.srvPool { + if s != nil { + nc.current = s + return nil + } + } + return ErrNoServers +} + +const tlsScheme = "tls" + +// Create the server pool using the options given. +// We will place a Url option first, followed by any +// Server Options. We will randomize the server pool unless +// the NoRandomize flag is set. +func (nc *Conn) setupServerPool() error { + nc.srvPool = make([]*srv, 0, srvPoolSize) + nc.urls = make(map[string]struct{}, srvPoolSize) + + // Create srv objects from each url string in nc.Opts.Servers + // and add them to the pool. + for _, urlString := range nc.Opts.Servers { + if err := nc.addURLToPool(urlString, false, false); err != nil { + return err + } + } + + // Randomize if allowed to + if !nc.Opts.NoRandomize { + nc.shufflePool(0) + } + + // Normally, if this one is set, Options.Servers should not be, + // but we always allowed that, so continue to do so. + if nc.Opts.Url != _EMPTY_ { + // Add to the end of the array + if err := nc.addURLToPool(nc.Opts.Url, false, false); err != nil { + return err + } + // Then swap it with first to guarantee that Options.Url is tried first. + last := len(nc.srvPool) - 1 + if last > 0 { + nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] + } + } else if len(nc.srvPool) <= 0 { + // Place default URL if pool is empty. + if err := nc.addURLToPool(DefaultURL, false, false); err != nil { + return err + } + } + + // Check for Scheme hint to move to TLS mode. + for _, srv := range nc.srvPool { + if srv.url.Scheme == tlsScheme || srv.url.Scheme == wsSchemeTLS { + // FIXME(dlc), this is for all in the pool, should be case by case. + nc.Opts.Secure = true + if nc.Opts.TLSConfig == nil { + nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + } + } + + return nc.pickServer() +} + +// Helper function to return scheme +func (nc *Conn) connScheme() string { + if nc.ws { + if nc.Opts.Secure { + return wsSchemeTLS + } + return wsScheme + } + if nc.Opts.Secure { + return tlsScheme + } + return "nats" +} + +// Return true iff u.Hostname() is an IP address. +func hostIsIP(u *url.URL) bool { + return net.ParseIP(u.Hostname()) != nil +} + +// addURLToPool adds an entry to the server pool +func (nc *Conn) addURLToPool(sURL string, implicit, saveTLSName bool) error { + if !strings.Contains(sURL, "://") { + sURL = fmt.Sprintf("%s://%s", nc.connScheme(), sURL) + } + var ( + u *url.URL + err error + ) + for i := 0; i < 2; i++ { + u, err = url.Parse(sURL) + if err != nil { + return err + } + if u.Port() != "" { + break + } + // In case given URL is of the form "localhost:", just add + // the port number at the end, otherwise, add ":4222". + if sURL[len(sURL)-1] != ':' { + sURL += ":" + } + switch u.Scheme { + case wsScheme: + sURL += defaultWSPortString + case wsSchemeTLS: + sURL += defaultWSSPortString + default: + sURL += defaultPortString + } + } + + isWS := isWebsocketScheme(u) + // We don't support mix and match of websocket and non websocket URLs. + // If this is the first URL, then we accept and switch the global state + // to websocket. After that, we will know how to reject mixed URLs. + if len(nc.srvPool) == 0 { + nc.ws = isWS + } else if isWS && !nc.ws || !isWS && nc.ws { + return fmt.Errorf("mixing of websocket and non websocket URLs is not allowed") + } + + var tlsName string + if implicit { + curl := nc.current.url + // Check to see if we do not have a url.User but current connected + // url does. If so copy over. + if u.User == nil && curl.User != nil { + u.User = curl.User + } + // We are checking to see if we have a secure connection and are + // adding an implicit server that just has an IP. If so we will remember + // the current hostname we are connected to. + if saveTLSName && hostIsIP(u) { + tlsName = curl.Hostname() + } + } + + s := &srv{url: u, isImplicit: implicit, tlsName: tlsName} + nc.srvPool = append(nc.srvPool, s) + nc.urls[u.Host] = struct{}{} + return nil +} + +// shufflePool swaps randomly elements in the server pool +// The `offset` value indicates that the shuffling should start at +// this offset and leave the elements from [0..offset) intact. +func (nc *Conn) shufflePool(offset int) { + if len(nc.srvPool) <= offset+1 { + return + } + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + for i := offset; i < len(nc.srvPool); i++ { + j := offset + r.Intn(i+1-offset) + nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] + } +} + +func (nc *Conn) newReaderWriter() { + nc.br = &natsReader{ + buf: make([]byte, defaultBufSize), + off: -1, + } + nc.bw = &natsWriter{ + limit: defaultBufSize, + plimit: nc.Opts.ReconnectBufSize, + } +} + +func (nc *Conn) bindToNewConn() { + bw := nc.bw + bw.w, bw.bufs = nc.newWriter(), nil + br := nc.br + br.r, br.n, br.off = nc.conn, 0, -1 +} + +func (nc *Conn) newWriter() io.Writer { + var w io.Writer = nc.conn + if nc.Opts.FlusherTimeout > 0 { + w = &timeoutWriter{conn: nc.conn, timeout: nc.Opts.FlusherTimeout} + } + return w +} + +func (w *natsWriter) appendString(str string) error { + return w.appendBufs([]byte(str)) +} + +func (w *natsWriter) appendBufs(bufs ...[]byte) error { + for _, buf := range bufs { + if len(buf) == 0 { + continue + } + if w.pending != nil { + w.pending.Write(buf) + } else { + w.bufs = append(w.bufs, buf...) + } + } + if w.pending == nil && len(w.bufs) >= w.limit { + return w.flush() + } + return nil +} + +func (w *natsWriter) writeDirect(strs ...string) error { + for _, str := range strs { + if _, err := w.w.Write([]byte(str)); err != nil { + return err + } + } + return nil +} + +func (w *natsWriter) flush() error { + // If a pending buffer is set, we don't flush. Code that needs to + // write directly to the socket, by-passing buffers during (re)connect, + // will use the writeDirect() API. + if w.pending != nil { + return nil + } + // Do not skip calling w.w.Write() here if len(w.bufs) is 0 because + // the actual writer (if websocket for instance) may have things + // to do such as sending control frames, etc.. + _, err := w.w.Write(w.bufs) + w.bufs = w.bufs[:0] + return err +} + +func (w *natsWriter) buffered() int { + if w.pending != nil { + return w.pending.Len() + } + return len(w.bufs) +} + +func (w *natsWriter) switchToPending() { + w.pending = new(bytes.Buffer) +} + +func (w *natsWriter) flushPendingBuffer() error { + if w.pending == nil || w.pending.Len() == 0 { + return nil + } + _, err := w.w.Write(w.pending.Bytes()) + // Reset the pending buffer at this point because we don't want + // to take the risk of sending duplicates or partials. + w.pending.Reset() + return err +} + +func (w *natsWriter) atLimitIfUsingPending() bool { + if w.pending == nil { + return false + } + return w.pending.Len() >= w.plimit +} + +func (w *natsWriter) doneWithPending() { + w.pending = nil +} + +// Notify the reader that we are done with the connect, where "read" operations +// happen synchronously and under the connection lock. After this point, "read" +// will be happening from the read loop, without the connection lock. +// +// Note: this runs under the connection lock. +func (r *natsReader) doneWithConnect() { + if wsr, ok := r.r.(*websocketReader); ok { + wsr.doneWithConnect() + } +} + +func (r *natsReader) Read() ([]byte, error) { + if r.off >= 0 { + off := r.off + r.off = -1 + return r.buf[off:r.n], nil + } + var err error + r.n, err = r.r.Read(r.buf) + return r.buf[:r.n], err +} + +func (r *natsReader) ReadString(delim byte) (string, error) { + var s string +build_string: + // First look if we have something in the buffer + if r.off >= 0 { + i := bytes.IndexByte(r.buf[r.off:r.n], delim) + if i >= 0 { + end := r.off + i + 1 + s += string(r.buf[r.off:end]) + r.off = end + if r.off >= r.n { + r.off = -1 + } + return s, nil + } + // We did not find the delim, so will have to read more. + s += string(r.buf[r.off:r.n]) + r.off = -1 + } + if _, err := r.Read(); err != nil { + return s, err + } + r.off = 0 + goto build_string +} + +// createConn will connect to the server and wrap the appropriate +// bufio structures. It will do the right thing when an existing +// connection is in place. +func (nc *Conn) createConn() (err error) { + if nc.Opts.Timeout < 0 { + return ErrBadTimeout + } + if _, cur := nc.currentServer(); cur == nil { + return ErrNoServers + } + + // If we have a reference to an in-process server then establish a + // connection using that. + if nc.Opts.InProcessServer != nil { + conn, err := nc.Opts.InProcessServer.InProcessConn() + if err != nil { + return fmt.Errorf("failed to get in-process connection: %w", err) + } + nc.conn = conn + nc.bindToNewConn() + return nil + } + + // We will auto-expand host names if they resolve to multiple IPs + hosts := []string{} + u := nc.current.url + + if !nc.Opts.SkipHostLookup && net.ParseIP(u.Hostname()) == nil { + addrs, _ := net.LookupHost(u.Hostname()) + for _, addr := range addrs { + hosts = append(hosts, net.JoinHostPort(addr, u.Port())) + } + } + // Fall back to what we were given. + if len(hosts) == 0 { + hosts = append(hosts, u.Host) + } + + // CustomDialer takes precedence. If not set, use Opts.Dialer which + // is set to a default *net.Dialer (in Connect()) if not explicitly + // set by the user. + dialer := nc.Opts.CustomDialer + if dialer == nil { + // We will copy and shorten the timeout if we have multiple hosts to try. + copyDialer := *nc.Opts.Dialer + copyDialer.Timeout = copyDialer.Timeout / time.Duration(len(hosts)) + dialer = ©Dialer + } + + if len(hosts) > 1 && !nc.Opts.NoRandomize { + rand.Shuffle(len(hosts), func(i, j int) { + hosts[i], hosts[j] = hosts[j], hosts[i] + }) + } + for _, host := range hosts { + nc.conn, err = dialer.Dial("tcp", host) + if err == nil { + break + } + } + if err != nil { + return err + } + + // If scheme starts with "ws" then branch out to websocket code. + if isWebsocketScheme(u) { + return nc.wsInitHandshake(u) + } + + // Reset reader/writer to this new TCP connection + nc.bindToNewConn() + return nil +} + +type skipTLSDialer interface { + SkipTLSHandshake() bool +} + +// makeTLSConn will wrap an existing Conn using TLS +func (nc *Conn) makeTLSConn() error { + if nc.Opts.CustomDialer != nil { + // we do nothing when asked to skip the TLS wrapper + sd, ok := nc.Opts.CustomDialer.(skipTLSDialer) + if ok && sd.SkipTLSHandshake() { + return nil + } + } + // Allow the user to configure their own tls.Config structure. + var tlsCopy *tls.Config + if nc.Opts.TLSConfig != nil { + tlsCopy = util.CloneTLSConfig(nc.Opts.TLSConfig) + } else { + tlsCopy = &tls.Config{} + } + // If its blank we will override it with the current host + if tlsCopy.ServerName == _EMPTY_ { + if nc.current.tlsName != _EMPTY_ { + tlsCopy.ServerName = nc.current.tlsName + } else { + h, _, _ := net.SplitHostPort(nc.current.url.Host) + tlsCopy.ServerName = h + } + } + nc.conn = tls.Client(nc.conn, tlsCopy) + conn := nc.conn.(*tls.Conn) + if err := conn.Handshake(); err != nil { + return err + } + nc.bindToNewConn() + return nil +} + +// TLSConnectionState retrieves the state of the TLS connection to the server +func (nc *Conn) TLSConnectionState() (tls.ConnectionState, error) { + if !nc.isConnected() { + return tls.ConnectionState{}, ErrDisconnected + } + + nc.mu.RLock() + conn := nc.conn + nc.mu.RUnlock() + + tc, ok := conn.(*tls.Conn) + if !ok { + return tls.ConnectionState{}, ErrConnectionNotTLS + } + + return tc.ConnectionState(), nil +} + +// waitForExits will wait for all socket watcher Go routines to +// be shutdown before proceeding. +func (nc *Conn) waitForExits() { + // Kick old flusher forcefully. + select { + case nc.fch <- struct{}{}: + default: + } + + // Wait for any previous go routines. + nc.wg.Wait() +} + +// ConnectedUrl reports the connected server's URL +func (nc *Conn) ConnectedUrl() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.current.url.String() +} + +// ConnectedUrlRedacted reports the connected server's URL with passwords redacted +func (nc *Conn) ConnectedUrlRedacted() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.current.url.Redacted() +} + +// ConnectedAddr returns the connected server's IP +func (nc *Conn) ConnectedAddr() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.conn.RemoteAddr().String() +} + +// ConnectedServerId reports the connected server's Id +func (nc *Conn) ConnectedServerId() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.ID +} + +// ConnectedServerName reports the connected server's name +func (nc *Conn) ConnectedServerName() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Name +} + +var semVerRe = regexp.MustCompile(`\Av?([0-9]+)\.?([0-9]+)?\.?([0-9]+)?`) + +func versionComponents(version string) (major, minor, patch int, err error) { + m := semVerRe.FindStringSubmatch(version) + if m == nil { + return 0, 0, 0, errors.New("invalid semver") + } + major, err = strconv.Atoi(m[1]) + if err != nil { + return -1, -1, -1, err + } + minor, err = strconv.Atoi(m[2]) + if err != nil { + return -1, -1, -1, err + } + patch, err = strconv.Atoi(m[3]) + if err != nil { + return -1, -1, -1, err + } + return major, minor, patch, err +} + +// Check for minimum server requirement. +func (nc *Conn) serverMinVersion(major, minor, patch int) bool { + smajor, sminor, spatch, _ := versionComponents(nc.ConnectedServerVersion()) + if smajor < major || (smajor == major && sminor < minor) || (smajor == major && sminor == minor && spatch < patch) { + return false + } + return true +} + +// ConnectedServerVersion reports the connected server's version as a string +func (nc *Conn) ConnectedServerVersion() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Version +} + +// ConnectedClusterName reports the connected server's cluster name if any +func (nc *Conn) ConnectedClusterName() string { + if nc == nil { + return _EMPTY_ + } + + nc.mu.RLock() + defer nc.mu.RUnlock() + + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Cluster +} + +// Low level setup for structs, etc +func (nc *Conn) setup() { + nc.subs = make(map[int64]*Subscription) + nc.pongs = make([]chan struct{}, 0, 8) + + nc.fch = make(chan struct{}, flushChanSize) + nc.rqch = make(chan struct{}) + + // Setup scratch outbound buffer for PUB/HPUB + pub := nc.scratch[:len(_HPUB_P_)] + copy(pub, _HPUB_P_) +} + +// Process a connected connection and initialize properly. +func (nc *Conn) processConnectInit() error { + + // Set our deadline for the whole connect process + nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) + defer nc.conn.SetDeadline(time.Time{}) + + // Set our status to connecting. + nc.status = CONNECTING + + // Process the INFO protocol received from the server + err := nc.processExpectedInfo() + if err != nil { + return err + } + + // Send the CONNECT protocol along with the initial PING protocol. + // Wait for the PONG response (or any error that we get from the server). + err = nc.sendConnect() + if err != nil { + return err + } + + // Reset the number of PING sent out + nc.pout = 0 + + // Start or reset Timer + if nc.Opts.PingInterval > 0 { + if nc.ptmr == nil { + nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) + } else { + nc.ptmr.Reset(nc.Opts.PingInterval) + } + } + + // Start the readLoop and flusher go routines, we will wait on both on a reconnect event. + nc.wg.Add(2) + go nc.readLoop() + go nc.flusher() + + // Notify the reader that we are done with the connect handshake, where + // reads were done synchronously and under the connection lock. + nc.br.doneWithConnect() + + return nil +} + +// Main connect function. Will connect to the nats-server. +func (nc *Conn) connect() (bool, error) { + var err error + var connectionEstablished bool + + // Create actual socket connection + // For first connect we walk all servers in the pool and try + // to connect immediately. + nc.mu.Lock() + defer nc.mu.Unlock() + nc.initc = true + // The pool may change inside the loop iteration due to INFO protocol. + for i := 0; i < len(nc.srvPool); i++ { + nc.current = nc.srvPool[i] + + if err = nc.createConn(); err == nil { + // This was moved out of processConnectInit() because + // that function is now invoked from doReconnect() too. + nc.setup() + + err = nc.processConnectInit() + + if err == nil { + nc.current.didConnect = true + nc.current.reconnects = 0 + nc.current.lastErr = nil + break + } else { + nc.mu.Unlock() + nc.close(DISCONNECTED, false, err) + nc.mu.Lock() + // Do not reset nc.current here since it would prevent + // RetryOnFailedConnect to work should this be the last server + // to try before starting doReconnect(). + } + } else { + // Cancel out default connection refused, will trigger the + // No servers error conditional + if strings.Contains(err.Error(), "connection refused") { + err = nil + } + } + } + + if err == nil && nc.status != CONNECTED { + err = ErrNoServers + } + + if err == nil { + connectionEstablished = true + nc.initc = false + } else if nc.Opts.RetryOnFailedConnect { + nc.setup() + nc.status = RECONNECTING + nc.bw.switchToPending() + go nc.doReconnect(ErrNoServers) + err = nil + } else { + nc.current = nil + } + + return connectionEstablished, err +} + +// This will check to see if the connection should be +// secure. This can be dictated from either end and should +// only be called after the INIT protocol has been received. +func (nc *Conn) checkForSecure() error { + // Check to see if we need to engage TLS + o := nc.Opts + + // Check for mismatch in setups + if o.Secure && !nc.info.TLSRequired && !nc.info.TLSAvailable { + return ErrSecureConnWanted + } else if nc.info.TLSRequired && !o.Secure { + // Switch to Secure since server needs TLS. + o.Secure = true + } + + // Need to rewrap with bufio + if o.Secure { + if err := nc.makeTLSConn(); err != nil { + return err + } + } + return nil +} + +// processExpectedInfo will look for the expected first INFO message +// sent when a connection is established. The lock should be held entering. +func (nc *Conn) processExpectedInfo() error { + + c := &control{} + + // Read the protocol + err := nc.readOp(c) + if err != nil { + return err + } + + // The nats protocol should send INFO first always. + if c.op != _INFO_OP_ { + return ErrNoInfoReceived + } + + // Parse the protocol + if err := nc.processInfo(c.args); err != nil { + return err + } + + if nc.Opts.Nkey != "" && nc.info.Nonce == "" { + return ErrNkeysNotSupported + } + + // For websocket connections, we already switched to TLS if need be, + // so we are done here. + if nc.ws { + return nil + } + + return nc.checkForSecure() +} + +// Sends a protocol control message by queuing into the bufio writer +// and kicking the flush Go routine. These writes are protected. +func (nc *Conn) sendProto(proto string) { + nc.mu.Lock() + nc.bw.appendString(proto) + nc.kickFlusher() + nc.mu.Unlock() +} + +// Generate a connect protocol message, issuing user/password if +// applicable. The lock is assumed to be held upon entering. +func (nc *Conn) connectProto() (string, error) { + o := nc.Opts + var nkey, sig, user, pass, token, ujwt string + u := nc.current.url.User + if u != nil { + // if no password, assume username is authToken + if _, ok := u.Password(); !ok { + token = u.Username() + } else { + user = u.Username() + pass, _ = u.Password() + } + } else { + // Take from options (possibly all empty strings) + user = o.User + pass = o.Password + token = o.Token + nkey = o.Nkey + } + + // Look for user jwt. + if o.UserJWT != nil { + if jwt, err := o.UserJWT(); err != nil { + return _EMPTY_, err + } else { + ujwt = jwt + } + if nkey != _EMPTY_ { + return _EMPTY_, ErrNkeyAndUser + } + } + + if ujwt != _EMPTY_ || nkey != _EMPTY_ { + if o.SignatureCB == nil { + if ujwt == _EMPTY_ { + return _EMPTY_, ErrNkeyButNoSigCB + } + return _EMPTY_, ErrUserButNoSigCB + } + sigraw, err := o.SignatureCB([]byte(nc.info.Nonce)) + if err != nil { + return _EMPTY_, fmt.Errorf("error signing nonce: %w", err) + } + sig = base64.RawURLEncoding.EncodeToString(sigraw) + } + + if nc.Opts.TokenHandler != nil { + if token != _EMPTY_ { + return _EMPTY_, ErrTokenAlreadySet + } + token = nc.Opts.TokenHandler() + } + + // If our server does not support headers then we can't do them or no responders. + hdrs := nc.info.Headers + cinfo := connectInfo{o.Verbose, o.Pedantic, ujwt, nkey, sig, user, pass, token, + o.Secure, o.Name, LangString, Version, clientProtoInfo, !o.NoEcho, hdrs, hdrs} + + b, err := json.Marshal(cinfo) + if err != nil { + return _EMPTY_, ErrJsonParse + } + + // Check if NoEcho is set and we have a server that supports it. + if o.NoEcho && nc.info.Proto < 1 { + return _EMPTY_, ErrNoEchoNotSupported + } + + return fmt.Sprintf(connectProto, b), nil +} + +// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. +func normalizeErr(line string) string { + s := strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_)) + s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") + return s +} + +// natsProtoErr represents an -ERR protocol message sent by the server. +type natsProtoErr struct { + description string +} + +func (nerr *natsProtoErr) Error() string { + return fmt.Sprintf("nats: %s", nerr.description) +} + +func (nerr *natsProtoErr) Is(err error) bool { + return strings.ToLower(nerr.Error()) == err.Error() +} + +// Send a connect protocol message to the server, issue user/password if +// applicable. Will wait for a flush to return from the server for error +// processing. +func (nc *Conn) sendConnect() error { + // Construct the CONNECT protocol string + cProto, err := nc.connectProto() + if err != nil { + return err + } + + // Write the protocol and PING directly to the underlying writer. + if err := nc.bw.writeDirect(cProto, pingProto); err != nil { + return err + } + + // We don't want to read more than we need here, otherwise + // we would need to transfer the excess read data to the readLoop. + // Since in normal situations we just are looking for a PONG\r\n, + // reading byte-by-byte here is ok. + proto, err := nc.readProto() + if err != nil { + return err + } + + // If opts.Verbose is set, handle +OK + if nc.Opts.Verbose && proto == okProto { + // Read the rest now... + proto, err = nc.readProto() + if err != nil { + return err + } + } + + // We expect a PONG + if proto != pongProto { + // But it could be something else, like -ERR + + // Since we no longer use ReadLine(), trim the trailing "\r\n" + proto = strings.TrimRight(proto, "\r\n") + + // If it's a server error... + if strings.HasPrefix(proto, _ERR_OP_) { + // Remove -ERR, trim spaces and quotes, and convert to lower case. + proto = normalizeErr(proto) + + // Check if this is an auth error + if authErr := checkAuthError(strings.ToLower(proto)); authErr != nil { + // This will schedule an async error if we are in reconnect, + // and keep track of the auth error for the current server. + // If we have got the same error twice, this sets nc.ar to true to + // indicate that the reconnect should be aborted (will be checked + // in doReconnect()). + nc.processAuthError(authErr) + } + return &natsProtoErr{proto} + } + + // Notify that we got an unexpected protocol. + return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, proto) + } + + // This is where we are truly connected. + nc.status = CONNECTED + + return nil +} + +// reads a protocol line. +func (nc *Conn) readProto() (string, error) { + return nc.br.ReadString('\n') +} + +// A control protocol line. +type control struct { + op, args string +} + +// Read a control line and process the intended op. +func (nc *Conn) readOp(c *control) error { + line, err := nc.readProto() + if err != nil { + return err + } + parseControl(line, c) + return nil +} + +// Parse a control line from the server. +func parseControl(line string, c *control) { + toks := strings.SplitN(line, _SPC_, 2) + if len(toks) == 1 { + c.op = strings.TrimSpace(toks[0]) + c.args = _EMPTY_ + } else if len(toks) == 2 { + c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + } else { + c.op = _EMPTY_ + } +} + +// flushReconnectPendingItems will push the pending items that were +// gathered while we were in a RECONNECTING state to the socket. +func (nc *Conn) flushReconnectPendingItems() error { + return nc.bw.flushPendingBuffer() +} + +// Stops the ping timer if set. +// Connection lock is held on entry. +func (nc *Conn) stopPingTimer() { + if nc.ptmr != nil { + nc.ptmr.Stop() + } +} + +// Try to reconnect using the option parameters. +// This function assumes we are allowed to reconnect. +func (nc *Conn) doReconnect(err error) { + // We want to make sure we have the other watchers shutdown properly + // here before we proceed past this point. + nc.waitForExits() + + // FIXME(dlc) - We have an issue here if we have + // outstanding flush points (pongs) and they were not + // sent out, but are still in the pipe. + + // Hold the lock manually and release where needed below, + // can't do defer here. + nc.mu.Lock() + + // Clear any errors. + nc.err = nil + // Perform appropriate callback if needed for a disconnect. + // DisconnectedErrCB has priority over deprecated DisconnectedCB + if !nc.initc { + if nc.Opts.DisconnectedErrCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) + } else if nc.Opts.DisconnectedCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) + } + } + + // This is used to wait on go routines exit if we start them in the loop + // but an error occurs after that. + waitForGoRoutines := false + var rt *time.Timer + // Channel used to kick routine out of sleep when conn is closed. + rqch := nc.rqch + // Counter that is increased when the whole list of servers has been tried. + var wlf int + + var jitter time.Duration + var rw time.Duration + // If a custom reconnect delay handler is set, this takes precedence. + crd := nc.Opts.CustomReconnectDelayCB + if crd == nil { + rw = nc.Opts.ReconnectWait + // TODO: since we sleep only after the whole list has been tried, we can't + // rely on individual *srv to know if it is a TLS or non-TLS url. + // We have to pick which type of jitter to use, for now, we use these hints: + jitter = nc.Opts.ReconnectJitter + if nc.Opts.Secure || nc.Opts.TLSConfig != nil { + jitter = nc.Opts.ReconnectJitterTLS + } + } + + for i := 0; len(nc.srvPool) > 0; { + cur, err := nc.selectNextServer() + if err != nil { + nc.err = err + break + } + + doSleep := i+1 >= len(nc.srvPool) + nc.mu.Unlock() + + if !doSleep { + i++ + // Release the lock to give a chance to a concurrent nc.Close() to break the loop. + runtime.Gosched() + } else { + i = 0 + var st time.Duration + if crd != nil { + wlf++ + st = crd(wlf) + } else { + st = rw + if jitter > 0 { + st += time.Duration(rand.Int63n(int64(jitter))) + } + } + if rt == nil { + rt = time.NewTimer(st) + } else { + rt.Reset(st) + } + select { + case <-rqch: + rt.Stop() + case <-rt.C: + } + } + // If the readLoop, etc.. go routines were started, wait for them to complete. + if waitForGoRoutines { + nc.waitForExits() + waitForGoRoutines = false + } + nc.mu.Lock() + + // Check if we have been closed first. + if nc.isClosed() { + break + } + + // Mark that we tried a reconnect + cur.reconnects++ + + // Try to create a new connection + err = nc.createConn() + + // Not yet connected, retry... + // Continue to hold the lock + if err != nil { + nc.err = nil + continue + } + + // We are reconnected + nc.Reconnects++ + + // Process connect logic + if nc.err = nc.processConnectInit(); nc.err != nil { + // Check if we should abort reconnect. If so, break out + // of the loop and connection will be closed. + if nc.ar { + break + } + nc.status = RECONNECTING + continue + } + + // Clear possible lastErr under the connection lock after + // a successful processConnectInit(). + nc.current.lastErr = nil + + // Clear out server stats for the server we connected to.. + cur.didConnect = true + cur.reconnects = 0 + + // Send existing subscription state + nc.resendSubscriptions() + + // Now send off and clear pending buffer + nc.err = nc.flushReconnectPendingItems() + if nc.err != nil { + nc.status = RECONNECTING + // Stop the ping timer (if set) + nc.stopPingTimer() + // Since processConnectInit() returned without error, the + // go routines were started, so wait for them to return + // on the next iteration (after releasing the lock). + waitForGoRoutines = true + continue + } + + // Done with the pending buffer + nc.bw.doneWithPending() + + // This is where we are truly connected. + nc.status = CONNECTED + + // If we are here with a retry on failed connect, indicate that the + // initial connect is now complete. + nc.initc = false + + // Queue up the reconnect callback. + if nc.Opts.ReconnectedCB != nil { + nc.ach.push(func() { nc.Opts.ReconnectedCB(nc) }) + } + + // Release lock here, we will return below. + nc.mu.Unlock() + + // Make sure to flush everything + nc.Flush() + + return + } + + // Call into close.. We have no servers left.. + if nc.err == nil { + nc.err = ErrNoServers + } + nc.mu.Unlock() + nc.close(CLOSED, true, nil) +} + +// processOpErr handles errors from reading or parsing the protocol. +// The lock should not be held entering this function. +func (nc *Conn) processOpErr(err error) { + nc.mu.Lock() + if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { + nc.mu.Unlock() + return + } + + if nc.Opts.AllowReconnect && nc.status == CONNECTED { + // Set our new status + nc.status = RECONNECTING + // Stop ping timer if set + nc.stopPingTimer() + if nc.conn != nil { + nc.conn.Close() + nc.conn = nil + } + + // Create pending buffer before reconnecting. + nc.bw.switchToPending() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + go nc.doReconnect(err) + nc.mu.Unlock() + return + } + + nc.status = DISCONNECTED + nc.err = err + nc.mu.Unlock() + nc.close(CLOSED, true, nil) +} + +// dispatch is responsible for calling any async callbacks +func (ac *asyncCallbacksHandler) asyncCBDispatcher() { + for { + ac.mu.Lock() + // Protect for spurious wakeups. We should get out of the + // wait only if there is an element to pop from the list. + for ac.head == nil { + ac.cond.Wait() + } + cur := ac.head + ac.head = cur.next + if cur == ac.tail { + ac.tail = nil + } + ac.mu.Unlock() + + // This signals that the dispatcher has been closed and all + // previous callbacks have been dispatched. + if cur.f == nil { + return + } + // Invoke callback outside of handler's lock + cur.f() + } +} + +// Add the given function to the tail of the list and +// signals the dispatcher. +func (ac *asyncCallbacksHandler) push(f func()) { + ac.pushOrClose(f, false) +} + +// Signals that we are closing... +func (ac *asyncCallbacksHandler) close() { + ac.pushOrClose(nil, true) +} + +// Add the given function to the tail of the list and +// signals the dispatcher. +func (ac *asyncCallbacksHandler) pushOrClose(f func(), close bool) { + ac.mu.Lock() + defer ac.mu.Unlock() + // Make sure that library is not calling push with nil function, + // since this is used to notify the dispatcher that it should stop. + if !close && f == nil { + panic("pushing a nil callback") + } + cb := &asyncCB{f: f} + if ac.tail != nil { + ac.tail.next = cb + } else { + ac.head = cb + } + ac.tail = cb + if close { + ac.cond.Broadcast() + } else { + ac.cond.Signal() + } +} + +// readLoop() will sit on the socket reading and processing the +// protocol from the server. It will dispatch appropriately based +// on the op type. +func (nc *Conn) readLoop() { + // Release the wait group on exit + defer nc.wg.Done() + + // Create a parseState if needed. + nc.mu.Lock() + if nc.ps == nil { + nc.ps = &parseState{} + } + conn := nc.conn + br := nc.br + nc.mu.Unlock() + + if conn == nil { + return + } + + for { + buf, err := br.Read() + if err == nil { + // With websocket, it is possible that there is no error but + // also no buffer returned (either WS control message or read of a + // partial compressed message). We could call parse(buf) which + // would ignore an empty buffer, but simply go back to top of the loop. + if len(buf) == 0 { + continue + } + err = nc.parse(buf) + } + if err != nil { + nc.processOpErr(err) + break + } + } + // Clear the parseState here.. + nc.mu.Lock() + nc.ps = nil + nc.mu.Unlock() +} + +// waitForMsgs waits on the conditional shared with readLoop and processMsg. +// It is used to deliver messages to asynchronous subscribers. +func (nc *Conn) waitForMsgs(s *Subscription) { + var closed bool + var delivered, max uint64 + + // Used to account for adjustments to sub.pBytes when we wrap back around. + msgLen := -1 + + for { + s.mu.Lock() + // Do accounting for last msg delivered here so we only lock once + // and drain state trips after callback has returned. + if msgLen >= 0 { + s.pMsgs-- + s.pBytes -= msgLen + msgLen = -1 + } + + if s.pHead == nil && !s.closed { + s.pCond.Wait() + } + // Pop the msg off the list + m := s.pHead + if m != nil { + s.pHead = m.next + if s.pHead == nil { + s.pTail = nil + } + if m.barrier != nil { + s.mu.Unlock() + if atomic.AddInt64(&m.barrier.refs, -1) == 0 { + m.barrier.f() + } + continue + } + msgLen = len(m.Data) + } + mcb := s.mcb + max = s.max + closed = s.closed + var fcReply string + if !s.closed { + s.delivered++ + delivered = s.delivered + if s.jsi != nil { + fcReply = s.checkForFlowControlResponse() + } + } + s.mu.Unlock() + + // Respond to flow control if applicable + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + if closed { + break + } + + // Deliver the message. + if m != nil && (max == 0 || delivered <= max) { + mcb(m) + } + // If we have hit the max for delivered msgs, remove sub. + if max > 0 && delivered >= max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + break + } + } + // Check for barrier messages + s.mu.Lock() + for m := s.pHead; m != nil; m = s.pHead { + if m.barrier != nil { + s.mu.Unlock() + if atomic.AddInt64(&m.barrier.refs, -1) == 0 { + m.barrier.f() + } + s.mu.Lock() + } + s.pHead = m.next + } + // Now check for pDone + done := s.pDone + s.mu.Unlock() + + if done != nil { + done() + } +} + +// Used for debugging and simulating loss for certain tests. +// Return what is to be used. If we return nil the message will be dropped. +type msgFilter func(m *Msg) *Msg + +func (nc *Conn) addMsgFilter(subject string, filter msgFilter) { + nc.subsMu.Lock() + defer nc.subsMu.Unlock() + + if nc.filters == nil { + nc.filters = make(map[string]msgFilter) + } + nc.filters[subject] = filter +} + +func (nc *Conn) removeMsgFilter(subject string) { + nc.subsMu.Lock() + defer nc.subsMu.Unlock() + + if nc.filters != nil { + delete(nc.filters, subject) + if len(nc.filters) == 0 { + nc.filters = nil + } + } +} + +// processMsg is called by parse and will place the msg on the +// appropriate channel/pending queue for processing. If the channel is full, +// or the pending queue is over the pending limits, the connection is +// considered a slow consumer. +func (nc *Conn) processMsg(data []byte) { + // Stats + atomic.AddUint64(&nc.InMsgs, 1) + atomic.AddUint64(&nc.InBytes, uint64(len(data))) + + // Don't lock the connection to avoid server cutting us off if the + // flusher is holding the connection lock, trying to send to the server + // that is itself trying to send data to us. + nc.subsMu.RLock() + sub := nc.subs[nc.ps.ma.sid] + var mf msgFilter + if nc.filters != nil { + mf = nc.filters[string(nc.ps.ma.subject)] + } + nc.subsMu.RUnlock() + + if sub == nil { + return + } + + // Copy them into string + subj := string(nc.ps.ma.subject) + reply := string(nc.ps.ma.reply) + + // Doing message create outside of the sub's lock to reduce contention. + // It's possible that we end-up not using the message, but that's ok. + + // FIXME(dlc): Need to copy, should/can do COW? + var msgPayload = data + if !nc.ps.msgCopied { + msgPayload = make([]byte, len(data)) + copy(msgPayload, data) + } + + // Check if we have headers encoded here. + var h Header + var err error + var ctrlMsg bool + var ctrlType int + var fcReply string + + if nc.ps.ma.hdr > 0 { + hbuf := msgPayload[:nc.ps.ma.hdr] + msgPayload = msgPayload[nc.ps.ma.hdr:] + h, err = decodeHeadersMsg(hbuf) + if err != nil { + // We will pass the message through but send async error. + nc.mu.Lock() + nc.err = ErrBadHeaderMsg + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrBadHeaderMsg) }) + } + nc.mu.Unlock() + } + } + + // FIXME(dlc): Should we recycle these containers? + m := &Msg{ + Subject: subj, + Reply: reply, + Header: h, + Data: msgPayload, + Sub: sub, + wsz: len(data) + len(subj) + len(reply), + } + + // Check for message filters. + if mf != nil { + if m = mf(m); m == nil { + // Drop message. + return + } + } + + sub.mu.Lock() + + // Check if closed. + if sub.closed { + sub.mu.Unlock() + return + } + + // Skip flow control messages in case of using a JetStream context. + jsi := sub.jsi + if jsi != nil { + // There has to be a header for it to be a control message. + if h != nil { + ctrlMsg, ctrlType = isJSControlMessage(m) + if ctrlMsg && ctrlType == jsCtrlHB { + // Check if the heartbeat has a "Consumer Stalled" header, if + // so, the value is the FC reply to send a nil message to. + // We will send it at the end of this function. + fcReply = m.Header.Get(consumerStalledHdr) + } + } + // Check for ordered consumer here. If checkOrderedMsgs returns true that means it detected a gap. + if !ctrlMsg && jsi.ordered && sub.checkOrderedMsgs(m) { + sub.mu.Unlock() + return + } + } + + // Skip processing if this is a control message. + if !ctrlMsg { + var chanSubCheckFC bool + // Subscription internal stats (applicable only for non ChanSubscription's) + if sub.typ != ChanSubscription { + sub.pMsgs++ + if sub.pMsgs > sub.pMsgsMax { + sub.pMsgsMax = sub.pMsgs + } + sub.pBytes += len(m.Data) + if sub.pBytes > sub.pBytesMax { + sub.pBytesMax = sub.pBytes + } + + // Check for a Slow Consumer + if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || + (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { + goto slowConsumer + } + } else if jsi != nil { + chanSubCheckFC = true + } + + // We have two modes of delivery. One is the channel, used by channel + // subscribers and syncSubscribers, the other is a linked list for async. + if sub.mch != nil { + select { + case sub.mch <- m: + default: + goto slowConsumer + } + } else { + // Push onto the async pList + if sub.pHead == nil { + sub.pHead = m + sub.pTail = m + if sub.pCond != nil { + sub.pCond.Signal() + } + } else { + sub.pTail.next = m + sub.pTail = m + } + } + if jsi != nil { + // Store the ACK metadata from the message to + // compare later on with the received heartbeat. + sub.trackSequences(m.Reply) + if chanSubCheckFC { + // For ChanSubscription, since we can't call this when a message + // is "delivered" (since user is pull from their own channel), + // we have a go routine that does this check, however, we do it + // also here to make it much more responsive. The go routine is + // really to avoid stalling when there is no new messages coming. + fcReply = sub.checkForFlowControlResponse() + } + } + } else if ctrlType == jsCtrlFC && m.Reply != _EMPTY_ { + // This is a flow control message. + // We will schedule the send of the FC reply once we have delivered the + // DATA message that was received before this flow control message, which + // has sequence `jsi.fciseq`. However, it is possible that this message + // has already been delivered, in that case, we need to send the FC reply now. + if sub.getJSDelivered() >= jsi.fciseq { + fcReply = m.Reply + } else { + // Schedule a reply after the previous message is delivered. + sub.scheduleFlowControlResponse(m.Reply) + } + } + + // Clear any SlowConsumer status. + sub.sc = false + sub.mu.Unlock() + + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + // Handle control heartbeat messages. + if ctrlMsg && ctrlType == jsCtrlHB && m.Reply == _EMPTY_ { + nc.checkForSequenceMismatch(m, sub, jsi) + } + + return + +slowConsumer: + sub.dropped++ + sc := !sub.sc + sub.sc = true + // Undo stats from above + if sub.typ != ChanSubscription { + sub.pMsgs-- + sub.pBytes -= len(m.Data) + } + sub.mu.Unlock() + if sc { + // Now we need connection's lock and we may end-up in the situation + // that we were trying to avoid, except that in this case, the client + // is already experiencing client-side slow consumer situation. + nc.mu.Lock() + nc.err = ErrSlowConsumer + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) }) + } + nc.mu.Unlock() + } +} + +// processPermissionsViolation is called when the server signals a subject +// permissions violation on either publish or subscribe. +func (nc *Conn) processPermissionsViolation(err string) { + nc.mu.Lock() + // create error here so we can pass it as a closure to the async cb dispatcher. + e := errors.New("nats: " + err) + nc.err = e + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, e) }) + } + nc.mu.Unlock() +} + +// processAuthError generally processing for auth errors. We want to do retries +// unless we get the same error again. This allows us for instance to swap credentials +// and have the app reconnect, but if nothing is changing we should bail. +// This function will return true if the connection should be closed, false otherwise. +// Connection lock is held on entry +func (nc *Conn) processAuthError(err error) bool { + nc.err = err + if !nc.initc && nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + // We should give up if we tried twice on this server and got the + // same error. This behavior can be modified using IgnoreAuthErrorAbort. + if nc.current.lastErr == err && !nc.Opts.IgnoreAuthErrorAbort { + nc.ar = true + } else { + nc.current.lastErr = err + } + return nc.ar +} + +// flusher is a separate Go routine that will process flush requests for the write +// bufio. This allows coalescing of writes to the underlying socket. +func (nc *Conn) flusher() { + // Release the wait group + defer nc.wg.Done() + + // snapshot the bw and conn since they can change from underneath of us. + nc.mu.Lock() + bw := nc.bw + conn := nc.conn + fch := nc.fch + nc.mu.Unlock() + + if conn == nil || bw == nil { + return + } + + for { + if _, ok := <-fch; !ok { + return + } + nc.mu.Lock() + + // Check to see if we should bail out. + if !nc.isConnected() || nc.isConnecting() || conn != nc.conn { + nc.mu.Unlock() + return + } + if bw.buffered() > 0 { + if err := bw.flush(); err != nil { + if nc.err == nil { + nc.err = err + } + if nc.Opts.AsyncErrorCB != nil { + nc.ach.push(func() { nc.Opts.AsyncErrorCB(nc, nil, err) }) + } + } + } + nc.mu.Unlock() + } +} + +// processPing will send an immediate pong protocol response to the +// server. The server uses this mechanism to detect dead clients. +func (nc *Conn) processPing() { + nc.sendProto(pongProto) +} + +// processPong is used to process responses to the client's ping +// messages. We use pings for the flush mechanism as well. +func (nc *Conn) processPong() { + var ch chan struct{} + + nc.mu.Lock() + if len(nc.pongs) > 0 { + ch = nc.pongs[0] + nc.pongs = append(nc.pongs[:0], nc.pongs[1:]...) + } + nc.pout = 0 + nc.mu.Unlock() + if ch != nil { + ch <- struct{}{} + } +} + +// processOK is a placeholder for processing OK messages. +func (nc *Conn) processOK() { + // do nothing +} + +// processInfo is used to parse the info messages sent +// from the server. +// This function may update the server pool. +func (nc *Conn) processInfo(info string) error { + if info == _EMPTY_ { + return nil + } + var ncInfo serverInfo + if err := json.Unmarshal([]byte(info), &ncInfo); err != nil { + return err + } + + // Copy content into connection's info structure. + nc.info = ncInfo + // The array could be empty/not present on initial connect, + // if advertise is disabled on that server, or servers that + // did not include themselves in the async INFO protocol. + // If empty, do not remove the implicit servers from the pool. + if len(nc.info.ConnectURLs) == 0 { + if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { + nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) + } + return nil + } + // Note about pool randomization: when the pool was first created, + // it was randomized (if allowed). We keep the order the same (removing + // implicit servers that are no longer sent to us). New URLs are sent + // to us in no specific order so don't need extra randomization. + hasNew := false + // This is what we got from the server we are connected to. + urls := nc.info.ConnectURLs + // Transform that to a map for easy lookups + tmp := make(map[string]struct{}, len(urls)) + for _, curl := range urls { + tmp[curl] = struct{}{} + } + // Walk the pool and removed the implicit servers that are no longer in the + // given array/map + sp := nc.srvPool + for i := 0; i < len(sp); i++ { + srv := sp[i] + curl := srv.url.Host + // Check if this URL is in the INFO protocol + _, inInfo := tmp[curl] + // Remove from the temp map so that at the end we are left with only + // new (or restarted) servers that need to be added to the pool. + delete(tmp, curl) + // Keep servers that were set through Options, but also the one that + // we are currently connected to (even if it is a discovered server). + if !srv.isImplicit || srv.url == nc.current.url { + continue + } + if !inInfo { + // Remove from server pool. Keep current order. + copy(sp[i:], sp[i+1:]) + nc.srvPool = sp[:len(sp)-1] + sp = nc.srvPool + i-- + } + } + // Figure out if we should save off the current non-IP hostname if we encounter a bare IP. + saveTLS := nc.current != nil && !hostIsIP(nc.current.url) + + // If there are any left in the tmp map, these are new (or restarted) servers + // and need to be added to the pool. + for curl := range tmp { + // Before adding, check if this is a new (as in never seen) URL. + // This is used to figure out if we invoke the DiscoveredServersCB + if _, present := nc.urls[curl]; !present { + hasNew = true + } + nc.addURLToPool(fmt.Sprintf("%s://%s", nc.connScheme(), curl), true, saveTLS) + } + if hasNew { + // Randomize the pool if allowed but leave the first URL in place. + if !nc.Opts.NoRandomize { + nc.shufflePool(1) + } + if !nc.initc && nc.Opts.DiscoveredServersCB != nil { + nc.ach.push(func() { nc.Opts.DiscoveredServersCB(nc) }) + } + } + if !nc.initc && ncInfo.LameDuckMode && nc.Opts.LameDuckModeHandler != nil { + nc.ach.push(func() { nc.Opts.LameDuckModeHandler(nc) }) + } + return nil +} + +// processAsyncInfo does the same than processInfo, but is called +// from the parser. Calls processInfo under connection's lock +// protection. +func (nc *Conn) processAsyncInfo(info []byte) { + nc.mu.Lock() + // Ignore errors, we will simply not update the server pool... + nc.processInfo(string(info)) + nc.mu.Unlock() +} + +// LastError reports the last error encountered via the connection. +// It can be used reliably within ClosedCB in order to find out reason +// why connection was closed for example. +func (nc *Conn) LastError() error { + if nc == nil { + return ErrInvalidConnection + } + nc.mu.RLock() + err := nc.err + nc.mu.RUnlock() + return err +} + +// Check if the given error string is an auth error, and if so returns +// the corresponding ErrXXX error, nil otherwise +func checkAuthError(e string) error { + if strings.HasPrefix(e, AUTHORIZATION_ERR) { + return ErrAuthorization + } + if strings.HasPrefix(e, AUTHENTICATION_EXPIRED_ERR) { + return ErrAuthExpired + } + if strings.HasPrefix(e, AUTHENTICATION_REVOKED_ERR) { + return ErrAuthRevoked + } + if strings.HasPrefix(e, ACCOUNT_AUTHENTICATION_EXPIRED_ERR) { + return ErrAccountAuthExpired + } + return nil +} + +// processErr processes any error messages from the server and +// sets the connection's LastError. +func (nc *Conn) processErr(ie string) { + // Trim, remove quotes + ne := normalizeErr(ie) + // convert to lower case. + e := strings.ToLower(ne) + + close := false + + // FIXME(dlc) - process Slow Consumer signals special. + if e == STALE_CONNECTION { + nc.processOpErr(ErrStaleConnection) + } else if e == MAX_CONNECTIONS_ERR { + nc.processOpErr(ErrMaxConnectionsExceeded) + } else if strings.HasPrefix(e, PERMISSIONS_ERR) { + nc.processPermissionsViolation(ne) + } else if authErr := checkAuthError(e); authErr != nil { + nc.mu.Lock() + close = nc.processAuthError(authErr) + nc.mu.Unlock() + } else { + close = true + nc.mu.Lock() + nc.err = errors.New("nats: " + ne) + nc.mu.Unlock() + } + if close { + nc.close(CLOSED, true, nil) + } +} + +// kickFlusher will send a bool on a channel to kick the +// flush Go routine to flush data to the server. +func (nc *Conn) kickFlusher() { + if nc.bw != nil { + select { + case nc.fch <- struct{}{}: + default: + } + } +} + +// Publish publishes the data argument to the given subject. The data +// argument is left untouched and needs to be correctly interpreted on +// the receiver. +func (nc *Conn) Publish(subj string, data []byte) error { + return nc.publish(subj, _EMPTY_, nil, data) +} + +// Header represents the optional Header for a NATS message, +// based on the implementation of http.Header. +type Header map[string][]string + +// Add adds the key, value pair to the header. It is case-sensitive +// and appends to any existing values associated with key. +func (h Header) Add(key, value string) { + h[key] = append(h[key], value) +} + +// Set sets the header entries associated with key to the single +// element value. It is case-sensitive and replaces any existing +// values associated with key. +func (h Header) Set(key, value string) { + h[key] = []string{value} +} + +// Get gets the first value associated with the given key. +// It is case-sensitive. +func (h Header) Get(key string) string { + if h == nil { + return _EMPTY_ + } + if v := h[key]; v != nil { + return v[0] + } + return _EMPTY_ +} + +// Values returns all values associated with the given key. +// It is case-sensitive. +func (h Header) Values(key string) []string { + return h[key] +} + +// Del deletes the values associated with a key. +// It is case-sensitive. +func (h Header) Del(key string) { + delete(h, key) +} + +// NewMsg creates a message for publishing that will use headers. +func NewMsg(subject string) *Msg { + return &Msg{ + Subject: subject, + Header: make(Header), + } +} + +const ( + hdrLine = "NATS/1.0\r\n" + crlf = "\r\n" + hdrPreEnd = len(hdrLine) - len(crlf) + statusHdr = "Status" + descrHdr = "Description" + lastConsumerSeqHdr = "Nats-Last-Consumer" + lastStreamSeqHdr = "Nats-Last-Stream" + consumerStalledHdr = "Nats-Consumer-Stalled" + noResponders = "503" + noMessagesSts = "404" + reqTimeoutSts = "408" + jetStream409Sts = "409" + controlMsg = "100" + statusLen = 3 // e.g. 20x, 40x, 50x +) + +// decodeHeadersMsg will decode and headers. +func decodeHeadersMsg(data []byte) (Header, error) { + br := bufio.NewReaderSize(bytes.NewReader(data), 128) + tp := textproto.NewReader(br) + l, err := tp.ReadLine() + if err != nil || len(l) < hdrPreEnd || l[:hdrPreEnd] != hdrLine[:hdrPreEnd] { + return nil, ErrBadHeaderMsg + } + + mh, err := readMIMEHeader(tp) + if err != nil { + return nil, err + } + + // Check if we have an inlined status. + if len(l) > hdrPreEnd { + var description string + status := strings.TrimSpace(l[hdrPreEnd:]) + if len(status) != statusLen { + description = strings.TrimSpace(status[statusLen:]) + status = status[:statusLen] + } + mh.Add(statusHdr, status) + if len(description) > 0 { + mh.Add(descrHdr, description) + } + } + return Header(mh), nil +} + +// readMIMEHeader returns a MIMEHeader that preserves the +// original case of the MIME header, based on the implementation +// of textproto.ReadMIMEHeader. +// +// https://golang.org/pkg/net/textproto/#Reader.ReadMIMEHeader +func readMIMEHeader(tp *textproto.Reader) (textproto.MIMEHeader, error) { + m := make(textproto.MIMEHeader) + for { + kv, err := tp.ReadLine() + if len(kv) == 0 { + return m, err + } + + // Process key fetching original case. + i := bytes.IndexByte([]byte(kv), ':') + if i < 0 { + return nil, ErrBadHeaderMsg + } + key := kv[:i] + if key == "" { + // Skip empty keys. + continue + } + i++ + for i < len(kv) && (kv[i] == ' ' || kv[i] == '\t') { + i++ + } + value := string(kv[i:]) + m[key] = append(m[key], value) + if err != nil { + return m, err + } + } +} + +// PublishMsg publishes the Msg structure, which includes the +// Subject, an optional Reply and an optional Data field. +func (nc *Conn) PublishMsg(m *Msg) error { + if m == nil { + return ErrInvalidMsg + } + hdr, err := m.headerBytes() + if err != nil { + return err + } + return nc.publish(m.Subject, m.Reply, hdr, m.Data) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { + return nc.publish(subj, reply, nil, data) +} + +// Used for handrolled Itoa +const digits = "0123456789" + +// publish is the internal function to publish messages to a nats-server. +// Sends a protocol data message by queuing into the bufio writer +// and kicking the flush go routine. These writes should be protected. +func (nc *Conn) publish(subj, reply string, hdr, data []byte) error { + if nc == nil { + return ErrInvalidConnection + } + if subj == "" { + return ErrBadSubject + } + nc.mu.Lock() + + // Check if headers attempted to be sent to server that does not support them. + if len(hdr) > 0 && !nc.info.Headers { + nc.mu.Unlock() + return ErrHeadersNotSupported + } + + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + + if nc.isDrainingPubs() { + nc.mu.Unlock() + return ErrConnectionDraining + } + + // Proactively reject payloads over the threshold set by server. + msgSize := int64(len(data) + len(hdr)) + // Skip this check if we are not yet connected (RetryOnFailedConnect) + if !nc.initc && msgSize > nc.info.MaxPayload { + nc.mu.Unlock() + return ErrMaxPayload + } + + // Check if we are reconnecting, and if so check if + // we have exceeded our reconnect outbound buffer limits. + if nc.bw.atLimitIfUsingPending() { + nc.mu.Unlock() + return ErrReconnectBufExceeded + } + + var mh []byte + if hdr != nil { + mh = nc.scratch[:len(_HPUB_P_)] + } else { + mh = nc.scratch[1:len(_HPUB_P_)] + } + mh = append(mh, subj...) + mh = append(mh, ' ') + if reply != "" { + mh = append(mh, reply...) + mh = append(mh, ' ') + } + + // We could be smarter here, but simple loop is ok, + // just avoid strconv in fast path. + // FIXME(dlc) - Find a better way here. + // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) + // go 1.14 some values strconv faster, may be able to switch over. + + var b [12]byte + var i = len(b) + + if hdr != nil { + if len(hdr) > 0 { + for l := len(hdr); l > 0; l /= 10 { + i-- + b[i] = digits[l%10] + } + } else { + i-- + b[i] = digits[0] + } + mh = append(mh, b[i:]...) + mh = append(mh, ' ') + // reset for below. + i = len(b) + } + + if msgSize > 0 { + for l := msgSize; l > 0; l /= 10 { + i-- + b[i] = digits[l%10] + } + } else { + i-- + b[i] = digits[0] + } + + mh = append(mh, b[i:]...) + mh = append(mh, _CRLF_...) + + if err := nc.bw.appendBufs(mh, hdr, data, _CRLF_BYTES_); err != nil { + nc.mu.Unlock() + return err + } + + nc.OutMsgs++ + nc.OutBytes += uint64(len(data) + len(hdr)) + + if len(nc.fch) == 0 { + nc.kickFlusher() + } + nc.mu.Unlock() + return nil +} + +// respHandler is the global response handler. It will look up +// the appropriate channel based on the last token and place +// the message on the channel if possible. +func (nc *Conn) respHandler(m *Msg) { + nc.mu.Lock() + + // Just return if closed. + if nc.isClosed() { + nc.mu.Unlock() + return + } + + var mch chan *Msg + + // Grab mch + rt := nc.respToken(m.Subject) + if rt != _EMPTY_ { + mch = nc.respMap[rt] + // Delete the key regardless, one response only. + delete(nc.respMap, rt) + } else if len(nc.respMap) == 1 { + // If the server has rewritten the subject, the response token (rt) + // will not match (could be the case with JetStream). If that is the + // case and there is a single entry, use that. + for k, v := range nc.respMap { + mch = v + delete(nc.respMap, k) + break + } + } + nc.mu.Unlock() + + // Don't block, let Request timeout instead, mch is + // buffered and we should delete the key before a + // second response is processed. + select { + case mch <- m: + default: + return + } +} + +// Helper to setup and send new request style requests. Return the chan to receive the response. +func (nc *Conn) createNewRequestAndSend(subj string, hdr, data []byte) (chan *Msg, string, error) { + nc.mu.Lock() + // Do setup for the new style if needed. + if nc.respMap == nil { + nc.initNewResp() + } + // Create new literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respInbox[nc.respSubLen:] + + nc.respMap[token] = mch + if nc.respMux == nil { + // Create the response subscription we will use for all new style responses. + // This will be on an _INBOX with an additional terminal token. The subscription + // will be on a wildcard. + s, err := nc.subscribeLocked(nc.respSub, _EMPTY_, nc.respHandler, nil, false, nil) + if err != nil { + nc.mu.Unlock() + return nil, token, err + } + nc.respScanf = strings.Replace(nc.respSub, "*", "%s", -1) + nc.respMux = s + } + nc.mu.Unlock() + + if err := nc.publish(subj, respInbox, hdr, data); err != nil { + return nil, token, err + } + + return mch, token, nil +} + +// RequestMsg will send a request payload including optional headers and deliver +// the response message, or an error, including a timeout if no message was received properly. +func (nc *Conn) RequestMsg(msg *Msg, timeout time.Duration) (*Msg, error) { + if msg == nil { + return nil, ErrInvalidMsg + } + hdr, err := msg.headerBytes() + if err != nil { + return nil, err + } + + return nc.request(msg.Subject, hdr, msg.Data, timeout) +} + +// Request will send a request payload and deliver the response message, +// or an error, including a timeout if no message was received properly. +func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { + return nc.request(subj, nil, data, timeout) +} + +func (nc *Conn) useOldRequestStyle() bool { + nc.mu.RLock() + r := nc.Opts.UseOldRequestStyle + nc.mu.RUnlock() + return r +} + +func (nc *Conn) request(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + + var m *Msg + var err error + + if nc.useOldRequestStyle() { + m, err = nc.oldRequest(subj, hdr, data, timeout) + } else { + m, err = nc.newRequest(subj, hdr, data, timeout) + } + + // Check for no responder status. + if err == nil && len(m.Data) == 0 && m.Header.Get(statusHdr) == noResponders { + m, err = nil, ErrNoResponders + } + return m, err +} + +func (nc *Conn) newRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + mch, token, err := nc.createNewRequestAndSend(subj, hdr, data) + if err != nil { + return nil, err + } + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-t.C: + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ErrTimeout + } + + return msg, nil +} + +// oldRequest will create an Inbox and perform a Request() call +// with the Inbox reply and return the first reply received. +// This is optimized for the case of multiple responses. +func (nc *Conn) oldRequest(subj string, hdr, data []byte, timeout time.Duration) (*Msg, error) { + inbox := nc.NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch, true, nil) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.publish(subj, inbox, hdr, data) + if err != nil { + return nil, err + } + + return s.NextMsg(timeout) +} + +// InboxPrefix is the prefix for all inbox subjects. +const ( + InboxPrefix = "_INBOX." + inboxPrefixLen = len(InboxPrefix) + replySuffixLen = 8 // Gives us 62^8 + rdigits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 +) + +// NewInbox will return an inbox string which can be used for directed replies from +// subscribers. These are guaranteed to be unique, but can be shared and subscribed +// to by others. +func NewInbox() string { + var b [inboxPrefixLen + nuidSize]byte + pres := b[:inboxPrefixLen] + copy(pres, InboxPrefix) + ns := b[inboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// Create a new inbox that is prefix aware. +func (nc *Conn) NewInbox() string { + if nc.Opts.InboxPrefix == _EMPTY_ { + return NewInbox() + } + + var sb strings.Builder + sb.WriteString(nc.Opts.InboxPrefix) + sb.WriteByte('.') + sb.WriteString(nuid.Next()) + return sb.String() +} + +// Function to init new response structures. +func (nc *Conn) initNewResp() { + nc.respSubPrefix = fmt.Sprintf("%s.", nc.NewInbox()) + nc.respSubLen = len(nc.respSubPrefix) + nc.respSub = fmt.Sprintf("%s*", nc.respSubPrefix) + nc.respMap = make(map[string]chan *Msg) + nc.respRand = rand.New(rand.NewSource(time.Now().UnixNano())) +} + +// newRespInbox creates a new literal response subject +// that will trigger the mux subscription handler. +// Lock should be held. +func (nc *Conn) newRespInbox() string { + if nc.respMap == nil { + nc.initNewResp() + } + + var sb strings.Builder + sb.WriteString(nc.respSubPrefix) + + rn := nc.respRand.Int63() + for i := 0; i < replySuffixLen; i++ { + sb.WriteByte(rdigits[rn%base]) + rn /= base + } + + return sb.String() +} + +// NewRespInbox is the new format used for _INBOX. +func (nc *Conn) NewRespInbox() string { + nc.mu.Lock() + s := nc.newRespInbox() + nc.mu.Unlock() + return s +} + +// respToken will return the last token of a literal response inbox +// which we use for the message channel lookup. This needs to do a +// scan to protect itself against the server changing the subject. +// Lock should be held. +func (nc *Conn) respToken(respInbox string) string { + var token string + n, err := fmt.Sscanf(respInbox, nc.respScanf, &token) + if err != nil || n != 1 { + return "" + } + return token +} + +// Subscribe will express interest in the given subject. The subject +// can have wildcards. +// There are two type of wildcards: * for partial, and > for full. +// A subscription on subject time.*.east would receive messages sent to time.us.east and time.eu.east. +// A subscription on subject time.us.> would receive messages sent to +// time.us.east and time.us.east.atlanta, while time.us.* would only match time.us.east +// since it can't match more than one token. +// Messages will be delivered to the associated MsgHandler. +func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, cb, nil, false, nil) +} + +// ChanSubscribe will express interest in the given subject and place +// all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, nil, ch, false, nil) +} + +// ChanQueueSubscribe will express interest in the given subject. +// All subscribers with the same queue name will form the queue group +// and only one member of the group will be selected to receive any given message, +// which will be placed on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +// Note: This is the same than QueueSubscribeSyncWithChan. +func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, group, nil, ch, false, nil) +} + +// SubscribeSync will express interest on the given subject. Messages will +// be received synchronously using Subscription.NextMsg(). +func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + mch := make(chan *Msg, nc.Opts.SubChanLen) + return nc.subscribe(subj, _EMPTY_, nil, mch, true, nil) +} + +// QueueSubscribe creates an asynchronous queue subscriber on the given subject. +// All subscribers with the same queue name will form the queue group and +// only one member of the group will be selected to receive any given +// message asynchronously. +func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, queue, cb, nil, false, nil) +} + +// QueueSubscribeSync creates a synchronous queue subscriber on the given +// subject. All subscribers with the same queue name will form the queue +// group and only one member of the group will be selected to receive any +// given message synchronously using Subscription.NextMsg(). +func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { + mch := make(chan *Msg, nc.Opts.SubChanLen) + return nc.subscribe(subj, queue, nil, mch, true, nil) +} + +// QueueSubscribeSyncWithChan will express interest in the given subject. +// All subscribers with the same queue name will form the queue group +// and only one member of the group will be selected to receive any given message, +// which will be placed on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +// Note: This is the same than ChanQueueSubscribe. +func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, queue, nil, ch, false, nil) +} + +// badSubject will do quick test on whether a subject is acceptable. +// Spaces are not allowed and all tokens should be > 0 in len. +func badSubject(subj string) bool { + if strings.ContainsAny(subj, " \t\r\n") { + return true + } + tokens := strings.Split(subj, ".") + for _, t := range tokens { + if len(t) == 0 { + return true + } + } + return false +} + +// badQueue will check a queue name for whitespace. +func badQueue(qname string) bool { + return strings.ContainsAny(qname, " \t\r\n") +} + +// subscribe is the internal subscribe function that indicates interest in a subject. +func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.subscribeLocked(subj, queue, cb, ch, isSync, js) +} + +func (nc *Conn) subscribeLocked(subj, queue string, cb MsgHandler, ch chan *Msg, isSync bool, js *jsSub) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + if badSubject(subj) { + return nil, ErrBadSubject + } + if queue != _EMPTY_ && badQueue(queue) { + return nil, ErrBadQueueName + } + + // Check for some error conditions. + if nc.isClosed() { + return nil, ErrConnectionClosed + } + if nc.isDraining() { + return nil, ErrConnectionDraining + } + + if cb == nil && ch == nil { + return nil, ErrBadSubscription + } + + sub := &Subscription{ + Subject: subj, + Queue: queue, + mcb: cb, + conn: nc, + jsi: js, + } + // Set pending limits. + if ch != nil { + sub.pMsgsLimit = cap(ch) + } else { + sub.pMsgsLimit = DefaultSubPendingMsgsLimit + } + sub.pBytesLimit = DefaultSubPendingBytesLimit + + // If we have an async callback, start up a sub specific + // Go routine to deliver the messages. + var sr bool + if cb != nil { + sub.typ = AsyncSubscription + sub.pCond = sync.NewCond(&sub.mu) + sr = true + } else if !isSync { + sub.typ = ChanSubscription + sub.mch = ch + } else { // Sync Subscription + sub.typ = SyncSubscription + sub.mch = ch + } + + nc.subsMu.Lock() + nc.ssid++ + sub.sid = nc.ssid + nc.subs[sub.sid] = sub + nc.subsMu.Unlock() + + // Let's start the go routine now that it is fully setup and registered. + if sr { + go nc.waitForMsgs(sub) + } + + // We will send these for all subs when we reconnect + // so that we can suppress here if reconnecting. + if !nc.isReconnecting() { + nc.bw.appendString(fmt.Sprintf(subProto, subj, queue, sub.sid)) + nc.kickFlusher() + } + + return sub, nil +} + +// NumSubscriptions returns active number of subscriptions. +func (nc *Conn) NumSubscriptions() int { + nc.mu.RLock() + defer nc.mu.RUnlock() + return len(nc.subs) +} + +// Lock for nc should be held here upon entry +func (nc *Conn) removeSub(s *Subscription) { + nc.subsMu.Lock() + delete(nc.subs, s.sid) + nc.subsMu.Unlock() + s.mu.Lock() + defer s.mu.Unlock() + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + + // If JS subscription then stop HB timer. + if jsi := s.jsi; jsi != nil { + if jsi.hbc != nil { + jsi.hbc.Stop() + jsi.hbc = nil + } + if jsi.csfct != nil { + jsi.csfct.Stop() + jsi.csfct = nil + } + } + + // Mark as invalid + s.closed = true + if s.pCond != nil { + s.pCond.Broadcast() + } +} + +// SubscriptionType is the type of the Subscription. +type SubscriptionType int + +// The different types of subscription types. +const ( + AsyncSubscription = SubscriptionType(iota) + SyncSubscription + ChanSubscription + NilSubscription + PullSubscription +) + +// Type returns the type of Subscription. +func (s *Subscription) Type() SubscriptionType { + if s == nil { + return NilSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + // Pull subscriptions are really a SyncSubscription and we want this + // type to be set internally for all delivered messages management, etc.. + // So check when to return PullSubscription to the user. + if s.jsi != nil && s.jsi.pull { + return PullSubscription + } + return s.typ +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (s *Subscription) IsValid() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + return s.conn != nil && !s.closed +} + +// Drain will remove interest but continue callbacks until all messages +// have been processed. +// +// For a JetStream subscription, if the library has created the JetStream +// consumer, the library will send a DeleteConsumer request to the server +// when the Drain operation completes. If a failure occurs when deleting +// the JetStream consumer, an error will be reported to the asynchronous +// error callback. +// If you do not wish the JetStream consumer to be automatically deleted, +// ensure that the consumer is not created by the library, which means +// create the consumer with AddConsumer and bind to this consumer. +func (s *Subscription) Drain() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, 0, true) +} + +// Unsubscribe will remove interest in the given subject. +// +// For a JetStream subscription, if the library has created the JetStream +// consumer, it will send a DeleteConsumer request to the server (if the +// unsubscribe itself was successful). If the delete operation fails, the +// error will be returned. +// If you do not wish the JetStream consumer to be automatically deleted, +// ensure that the consumer is not created by the library, which means +// create the consumer with AddConsumer and bind to this consumer (using +// the nats.Bind() option). +func (s *Subscription) Unsubscribe() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + closed := s.closed + dc := s.jsi != nil && s.jsi.dc + s.mu.Unlock() + if conn == nil || conn.IsClosed() { + return ErrConnectionClosed + } + if closed { + return ErrBadSubscription + } + if conn.IsDraining() { + return ErrConnectionDraining + } + err := conn.unsubscribe(s, 0, false) + if err == nil && dc { + err = s.deleteConsumer() + } + return err +} + +// checkDrained will watch for a subscription to be fully drained +// and then remove it. +func (nc *Conn) checkDrained(sub *Subscription) { + if nc == nil || sub == nil { + return + } + + // This allows us to know that whatever we have in the client pending + // is correct and the server will not send additional information. + nc.Flush() + + sub.mu.Lock() + // For JS subscriptions, check if we are going to delete the + // JS consumer when drain completes. + dc := sub.jsi != nil && sub.jsi.dc + sub.mu.Unlock() + + // Once we are here we just wait for Pending to reach 0 or + // any other state to exit this go routine. + for { + // check connection is still valid. + if nc.IsClosed() { + return + } + + // Check subscription state + sub.mu.Lock() + conn := sub.conn + closed := sub.closed + pMsgs := sub.pMsgs + sub.mu.Unlock() + + if conn == nil || closed || pMsgs == 0 { + nc.mu.Lock() + nc.removeSub(sub) + nc.mu.Unlock() + if dc { + if err := sub.deleteConsumer(); err != nil { + nc.mu.Lock() + if errCB := nc.Opts.AsyncErrorCB; errCB != nil { + nc.ach.push(func() { errCB(nc, sub, err) }) + } + nc.mu.Unlock() + } + } + return + } + + time.Sleep(100 * time.Millisecond) + } +} + +// AutoUnsubscribe will issue an automatic Unsubscribe that is +// processed by the server when max messages have been received. +// This can be useful when sending a request to an unknown number +// of subscribers. +func (s *Subscription) AutoUnsubscribe(max int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + closed := s.closed + s.mu.Unlock() + if conn == nil || closed { + return ErrBadSubscription + } + return conn.unsubscribe(s, max, false) +} + +// unsubscribe performs the low level unsubscribe to the server. +// Use Subscription.Unsubscribe() +func (nc *Conn) unsubscribe(sub *Subscription, max int, drainMode bool) error { + var maxStr string + if max > 0 { + sub.mu.Lock() + sub.max = uint64(max) + if sub.delivered < sub.max { + maxStr = strconv.Itoa(max) + } + sub.mu.Unlock() + } + + nc.mu.Lock() + // ok here, but defer is expensive + defer nc.mu.Unlock() + + if nc.isClosed() { + return ErrConnectionClosed + } + + nc.subsMu.RLock() + s := nc.subs[sub.sid] + nc.subsMu.RUnlock() + // Already unsubscribed + if s == nil { + return nil + } + + if maxStr == _EMPTY_ && !drainMode { + nc.removeSub(s) + } + + if drainMode { + go nc.checkDrained(sub) + } + + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.appendString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + nc.kickFlusher() + } + + // For JetStream subscriptions cancel the attached context if there is any. + var cancel func() + sub.mu.Lock() + jsi := sub.jsi + if jsi != nil { + cancel = jsi.cancel + jsi.cancel = nil + } + sub.mu.Unlock() + if cancel != nil { + cancel() + } + + return nil +} + +// NextMsg will return the next message available to a synchronous subscriber +// or block until one is available. An error is returned if the subscription is invalid (ErrBadSubscription), +// the connection is closed (ErrConnectionClosed), the timeout is reached (ErrTimeout), +// or if there were no responders (ErrNoResponders) when used in the context of a request/reply. +func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState(false) + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + // If something is available right away, let's optimize that case. + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } else { + return msg, nil + } + default: + } + + // If we are here a message was not immediately available, so lets loop + // with a timeout. + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + select { + case msg, ok = <-mch: + if !ok { + return nil, s.getNextMsgErr() + } + if err := s.processNextMsgDelivered(msg); err != nil { + return nil, err + } + case <-t.C: + return nil, ErrTimeout + } + + return msg, nil +} + +// validateNextMsgState checks whether the subscription is in a valid +// state to call NextMsg and be delivered another message synchronously. +// This should be called while holding the lock. +func (s *Subscription) validateNextMsgState(pullSubInternal bool) error { + if s.connClosed { + return ErrConnectionClosed + } + if s.mch == nil { + if s.max > 0 && s.delivered >= s.max { + return ErrMaxMessages + } else if s.closed { + return ErrBadSubscription + } + } + if s.mcb != nil { + return ErrSyncSubRequired + } + if s.sc { + s.sc = false + return ErrSlowConsumer + } + // Unless this is from an internal call, reject use of this API. + // Users should use Fetch() instead. + if !pullSubInternal && s.jsi != nil && s.jsi.pull { + return ErrTypeSubscription + } + return nil +} + +// This is called when the sync channel has been closed. +// The error returned will be either connection or subscription +// closed depending on what caused NextMsg() to fail. +func (s *Subscription) getNextMsgErr() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.connClosed { + return ErrConnectionClosed + } + return ErrBadSubscription +} + +// processNextMsgDelivered takes a message and applies the needed +// accounting to the stats from the subscription, returning an +// error in case we have the maximum number of messages have been +// delivered already. It should not be called while holding the lock. +func (s *Subscription) processNextMsgDelivered(msg *Msg) error { + s.mu.Lock() + nc := s.conn + max := s.max + + var fcReply string + // Update some stats. + s.delivered++ + delivered := s.delivered + if s.jsi != nil { + fcReply = s.checkForFlowControlResponse() + } + + if s.typ == SyncSubscription { + s.pMsgs-- + s.pBytes -= len(msg.Data) + } + s.mu.Unlock() + + if fcReply != _EMPTY_ { + nc.Publish(fcReply, nil) + } + + if max > 0 { + if delivered > max { + return ErrMaxMessages + } + // Remove subscription if we have reached max. + if delivered == max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + } + } + if len(msg.Data) == 0 && msg.Header.Get(statusHdr) == noResponders { + return ErrNoResponders + } + + return nil +} + +// Queued returns the number of queued messages in the client for this subscription. +// DEPRECATED: Use Pending() +func (s *Subscription) QueuedMsgs() (int, error) { + m, _, err := s.Pending() + return int(m), err +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (s *Subscription) Pending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgs, s.pBytes, nil +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (s *Subscription) MaxPending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsMax, s.pBytesMax, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (s *Subscription) ClearMaxPending() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + s.pMsgsMax, s.pBytesMax = 0, 0 + return nil +} + +// Pending Limits +const ( + // DefaultSubPendingMsgsLimit will be 512k msgs. + DefaultSubPendingMsgsLimit = 512 * 1024 + // DefaultSubPendingBytesLimit is 64MB + DefaultSubPendingBytesLimit = 64 * 1024 * 1024 +) + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (s *Subscription) PendingLimits() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsLimit, s.pBytesLimit, nil +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + if msgLimit == 0 || bytesLimit == 0 { + return ErrInvalidArg + } + s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit + return nil +} + +// Delivered returns the number of delivered messages for this subscription. +func (s *Subscription) Delivered() (int64, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, ErrBadSubscription + } + return int64(s.delivered), nil +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (s *Subscription) Dropped() (int, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil || s.closed { + return -1, ErrBadSubscription + } + return s.dropped, nil +} + +// Respond allows a convenient way to respond to requests in service based subscriptions. +func (m *Msg) Respond(data []byte) error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == "" { + return ErrMsgNoReply + } + m.Sub.mu.Lock() + nc := m.Sub.conn + m.Sub.mu.Unlock() + // No need to check the connection here since the call to publish will do all the checking. + return nc.Publish(m.Reply, data) +} + +// RespondMsg allows a convenient way to respond to requests in service based subscriptions that might include headers +func (m *Msg) RespondMsg(msg *Msg) error { + if m == nil || m.Sub == nil { + return ErrMsgNotBound + } + if m.Reply == "" { + return ErrMsgNoReply + } + msg.Subject = m.Reply + m.Sub.mu.Lock() + nc := m.Sub.conn + m.Sub.mu.Unlock() + // No need to check the connection here since the call to publish will do all the checking. + return nc.PublishMsg(msg) +} + +// FIXME: This is a hack +// removeFlushEntry is needed when we need to discard queued up responses +// for our pings as part of a flush call. This happens when we have a flush +// call outstanding and we call close. +func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.pongs == nil { + return false + } + for i, c := range nc.pongs { + if c == ch { + nc.pongs[i] = nil + return true + } + } + return false +} + +// The lock must be held entering this function. +func (nc *Conn) sendPing(ch chan struct{}) { + nc.pongs = append(nc.pongs, ch) + nc.bw.appendString(pingProto) + // Flush in place. + nc.bw.flush() +} + +// This will fire periodically and send a client origin +// ping to the server. Will also check that we have received +// responses from the server. +func (nc *Conn) processPingTimer() { + nc.mu.Lock() + + if nc.status != CONNECTED { + nc.mu.Unlock() + return + } + + // Check for violation + nc.pout++ + if nc.pout > nc.Opts.MaxPingsOut { + nc.mu.Unlock() + nc.processOpErr(ErrStaleConnection) + return + } + + nc.sendPing(nil) + nc.ptmr.Reset(nc.Opts.PingInterval) + nc.mu.Unlock() +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { + if nc == nil { + return ErrInvalidConnection + } + if timeout <= 0 { + return ErrBadTimeout + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + // Create a buffered channel to prevent chan send to block + // in processPong() if this code here times out just when + // PONG was received. + ch := make(chan struct{}, 1) + nc.sendPing(ch) + nc.mu.Unlock() + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-t.C: + err = ErrTimeout + } + + if err != nil { + nc.removeFlushEntry(ch) + } + return +} + +// RTT calculates the round trip time between this client and the server. +func (nc *Conn) RTT() (time.Duration, error) { + if nc.IsClosed() { + return 0, ErrConnectionClosed + } + if nc.IsReconnecting() { + return 0, ErrDisconnected + } + start := time.Now() + if err := nc.FlushTimeout(10 * time.Second); err != nil { + return 0, err + } + return time.Since(start), nil +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (nc *Conn) Flush() error { + return nc.FlushTimeout(10 * time.Second) +} + +// Buffered will return the number of bytes buffered to be sent to the server. +// FIXME(dlc) take into account disconnected state. +func (nc *Conn) Buffered() (int, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() || nc.bw == nil { + return -1, ErrConnectionClosed + } + return nc.bw.buffered(), nil +} + +// resendSubscriptions will send our subscription state back to the +// server. Used in reconnects +func (nc *Conn) resendSubscriptions() { + // Since we are going to send protocols to the server, we don't want to + // be holding the subsMu lock (which is used in processMsg). So copy + // the subscriptions in a temporary array. + nc.subsMu.RLock() + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + subs = append(subs, s) + } + nc.subsMu.RUnlock() + for _, s := range subs { + adjustedMax := uint64(0) + s.mu.Lock() + if s.max > 0 { + if s.delivered < s.max { + adjustedMax = s.max - s.delivered + } + // adjustedMax could be 0 here if the number of delivered msgs + // reached the max, if so unsubscribe. + if adjustedMax == 0 { + s.mu.Unlock() + nc.bw.writeDirect(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) + continue + } + } + subj, queue, sid := s.Subject, s.Queue, s.sid + s.mu.Unlock() + + nc.bw.writeDirect(fmt.Sprintf(subProto, subj, queue, sid)) + if adjustedMax > 0 { + maxStr := strconv.Itoa(int(adjustedMax)) + nc.bw.writeDirect(fmt.Sprintf(unsubProto, sid, maxStr)) + } + } +} + +// This will clear any pending flush calls and release pending calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingFlushCalls() { + // Clear any queued pongs, e.g. pending flush calls. + for _, ch := range nc.pongs { + if ch != nil { + close(ch) + } + } + nc.pongs = nil +} + +// This will clear any pending Request calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingRequestCalls() { + if nc.respMap == nil { + return + } + for key, ch := range nc.respMap { + if ch != nil { + close(ch) + delete(nc.respMap, key) + } + } +} + +// Low level close call that will do correct cleanup and set +// desired status. Also controls whether user defined callbacks +// will be triggered. The lock should not be held entering this +// function. This function will handle the locking manually. +func (nc *Conn) close(status Status, doCBs bool, err error) { + nc.mu.Lock() + if nc.isClosed() { + nc.status = status + nc.mu.Unlock() + return + } + nc.status = CLOSED + + // Kick the Go routines so they fall out. + nc.kickFlusher() + + // If the reconnect timer is waiting between a reconnect attempt, + // this will kick it out. + if nc.rqch != nil { + close(nc.rqch) + nc.rqch = nil + } + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any queued and blocking Requests. + nc.clearPendingRequestCalls() + + // Stop ping timer if set. + nc.stopPingTimer() + nc.ptmr = nil + + // Need to close and set TCP conn to nil if reconnect loop has stopped, + // otherwise we would incorrectly invoke Disconnect handler (if set) + // down below. + if nc.ar && nc.conn != nil { + nc.conn.Close() + nc.conn = nil + } else if nc.conn != nil { + // Go ahead and make sure we have flushed the outbound + nc.bw.flush() + defer nc.conn.Close() + } + + // Close sync subscriber channels and release any + // pending NextMsg() calls. + nc.subsMu.Lock() + for _, s := range nc.subs { + s.mu.Lock() + + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + // Mark as invalid, for signaling to waitForMsgs + s.closed = true + // Mark connection closed in subscription + s.connClosed = true + // If we have an async subscription, signals it to exit + if s.typ == AsyncSubscription && s.pCond != nil { + s.pCond.Signal() + } + + s.mu.Unlock() + } + nc.subs = nil + nc.subsMu.Unlock() + + nc.status = status + + // Perform appropriate callback if needed for a disconnect. + if doCBs { + if nc.conn != nil { + if nc.Opts.DisconnectedErrCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedErrCB(nc, err) }) + } else if nc.Opts.DisconnectedCB != nil { + nc.ach.push(func() { nc.Opts.DisconnectedCB(nc) }) + } + } + if nc.Opts.ClosedCB != nil { + nc.ach.push(func() { nc.Opts.ClosedCB(nc) }) + } + } + // If this is terminal, then we have to notify the asyncCB handler that + // it can exit once all async callbacks have been dispatched. + if status == CLOSED { + nc.ach.close() + } + nc.mu.Unlock() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush() and NextMsg() +func (nc *Conn) Close() { + if nc != nil { + // This will be a no-op if the connection was not websocket. + // We do this here as opposed to inside close() because we want + // to do this only for the final user-driven close of the client. + // Otherwise, we would need to change close() to pass a boolean + // indicating that this is the case. + nc.wsClose() + nc.close(CLOSED, !nc.Opts.NoCallbacksAfterClientClose, nil) + } +} + +// IsClosed tests if a Conn has been closed. +func (nc *Conn) IsClosed() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isClosed() +} + +// IsReconnecting tests if a Conn is reconnecting. +func (nc *Conn) IsReconnecting() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isReconnecting() +} + +// IsConnected tests if a Conn is connected. +func (nc *Conn) IsConnected() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isConnected() +} + +// drainConnection will run in a separate Go routine and will +// flush all publishes and drain all active subscriptions. +func (nc *Conn) drainConnection() { + // Snapshot subs list. + nc.mu.Lock() + + // Check again here if we are in a state to not process. + if nc.isClosed() { + nc.mu.Unlock() + return + } + if nc.isConnecting() || nc.isReconnecting() { + nc.mu.Unlock() + // Move to closed state. + nc.Close() + return + } + + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + if s == nc.respMux { + // Skip since might be in use while messages + // are being processed (can miss responses). + continue + } + subs = append(subs, s) + } + errCB := nc.Opts.AsyncErrorCB + drainWait := nc.Opts.DrainTimeout + respMux := nc.respMux + nc.mu.Unlock() + + // for pushing errors with context. + pushErr := func(err error) { + nc.mu.Lock() + nc.err = err + if errCB != nil { + nc.ach.push(func() { errCB(nc, nil, err) }) + } + nc.mu.Unlock() + } + + // Do subs first, skip request handler if present. + for _, s := range subs { + if err := s.Drain(); err != nil { + // We will notify about these but continue. + pushErr(err) + } + } + + // Wait for the subscriptions to drop to zero. + timeout := time.Now().Add(drainWait) + var min int + if respMux != nil { + min = 1 + } else { + min = 0 + } + for time.Now().Before(timeout) { + if nc.NumSubscriptions() == min { + break + } + time.Sleep(10 * time.Millisecond) + } + + // In case there was a request/response handler + // then need to call drain at the end. + if respMux != nil { + if err := respMux.Drain(); err != nil { + // We will notify about these but continue. + pushErr(err) + } + for time.Now().Before(timeout) { + if nc.NumSubscriptions() == 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + } + + // Check if we timed out. + if nc.NumSubscriptions() != 0 { + pushErr(ErrDrainTimeout) + } + + // Flip State + nc.mu.Lock() + nc.status = DRAINING_PUBS + nc.mu.Unlock() + + // Do publish drain via Flush() call. + err := nc.FlushTimeout(5 * time.Second) + if err != nil { + pushErr(err) + } + + // Move to closed state. + nc.Close() +} + +// Drain will put a connection into a drain state. All subscriptions will +// immediately be put into a drain state. Upon completion, the publishers +// will be drained and can not publish any additional messages. Upon draining +// of the publishers, the connection will be closed. Use the ClosedCB() +// option to know when the connection has moved from draining to closed. +// +// See note in Subscription.Drain for JetStream subscriptions. +func (nc *Conn) Drain() error { + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + if nc.isConnecting() || nc.isReconnecting() { + nc.mu.Unlock() + nc.Close() + return ErrConnectionReconnecting + } + if nc.isDraining() { + nc.mu.Unlock() + return nil + } + nc.status = DRAINING_SUBS + go nc.drainConnection() + nc.mu.Unlock() + + return nil +} + +// IsDraining tests if a Conn is in the draining state. +func (nc *Conn) IsDraining() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.isDraining() +} + +// caller must lock +func (nc *Conn) getServers(implicitOnly bool) []string { + poolSize := len(nc.srvPool) + var servers = make([]string, 0) + for i := 0; i < poolSize; i++ { + if implicitOnly && !nc.srvPool[i].isImplicit { + continue + } + url := nc.srvPool[i].url + servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + } + return servers +} + +// Servers returns the list of known server urls, including additional +// servers discovered after a connection has been established. If +// authentication is enabled, use UserInfo or Token when connecting with +// these urls. +func (nc *Conn) Servers() []string { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.getServers(false) +} + +// DiscoveredServers returns only the server urls that have been discovered +// after a connection has been established. If authentication is enabled, +// use UserInfo or Token when connecting with these urls. +func (nc *Conn) DiscoveredServers() []string { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.getServers(true) +} + +// Status returns the current state of the connection. +func (nc *Conn) Status() Status { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.status +} + +// Test if Conn has been closed Lock is assumed held. +func (nc *Conn) isClosed() bool { + return nc.status == CLOSED +} + +// Test if Conn is in the process of connecting +func (nc *Conn) isConnecting() bool { + return nc.status == CONNECTING +} + +// Test if Conn is being reconnected. +func (nc *Conn) isReconnecting() bool { + return nc.status == RECONNECTING +} + +// Test if Conn is connected or connecting. +func (nc *Conn) isConnected() bool { + return nc.status == CONNECTED || nc.isDraining() +} + +// Test if Conn is in the draining state. +func (nc *Conn) isDraining() bool { + return nc.status == DRAINING_SUBS || nc.status == DRAINING_PUBS +} + +// Test if Conn is in the draining state for pubs. +func (nc *Conn) isDrainingPubs() bool { + return nc.status == DRAINING_PUBS +} + +// Stats will return a race safe copy of the Statistics section for the connection. +func (nc *Conn) Stats() Statistics { + // Stats are updated either under connection's mu or with atomic operations + // for inbound stats in processMsg(). + nc.mu.Lock() + stats := Statistics{ + InMsgs: atomic.LoadUint64(&nc.InMsgs), + InBytes: atomic.LoadUint64(&nc.InBytes), + OutMsgs: nc.OutMsgs, + OutBytes: nc.OutBytes, + Reconnects: nc.Reconnects, + } + nc.mu.Unlock() + return stats +} + +// MaxPayload returns the size limit that a message payload can have. +// This is set by the server configuration and delivered to the client +// upon connect. +func (nc *Conn) MaxPayload() int64 { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.MaxPayload +} + +// HeadersSupported will return if the server supports headers +func (nc *Conn) HeadersSupported() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.Headers +} + +// AuthRequired will return if the connected server requires authorization. +func (nc *Conn) AuthRequired() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.AuthRequired +} + +// TLSRequired will return if the connected server requires TLS connections. +func (nc *Conn) TLSRequired() bool { + nc.mu.RLock() + defer nc.mu.RUnlock() + return nc.info.TLSRequired +} + +// Barrier schedules the given function `f` to all registered asynchronous +// subscriptions. +// Only the last subscription to see this barrier will invoke the function. +// If no subscription is registered at the time of this call, `f()` is invoked +// right away. +// ErrConnectionClosed is returned if the connection is closed prior to +// the call. +func (nc *Conn) Barrier(f func()) error { + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + nc.subsMu.Lock() + // Need to figure out how many non chan subscriptions there are + numSubs := 0 + for _, sub := range nc.subs { + if sub.typ == AsyncSubscription { + numSubs++ + } + } + if numSubs == 0 { + nc.subsMu.Unlock() + nc.mu.Unlock() + f() + return nil + } + barrier := &barrierInfo{refs: int64(numSubs), f: f} + for _, sub := range nc.subs { + sub.mu.Lock() + if sub.mch == nil { + msg := &Msg{barrier: barrier} + // Push onto the async pList + if sub.pTail != nil { + sub.pTail.next = msg + } else { + sub.pHead = msg + sub.pCond.Signal() + } + sub.pTail = msg + } + sub.mu.Unlock() + } + nc.subsMu.Unlock() + nc.mu.Unlock() + return nil +} + +// GetClientIP returns the client IP as known by the server. +// Supported as of server version 2.1.6. +func (nc *Conn) GetClientIP() (net.IP, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() { + return nil, ErrConnectionClosed + } + if nc.info.ClientIP == "" { + return nil, ErrClientIPNotSupported + } + ip := net.ParseIP(nc.info.ClientIP) + return ip, nil +} + +// GetClientID returns the client ID assigned by the server to which +// the client is currently connected to. Note that the value may change if +// the client reconnects. +// This function returns ErrClientIDNotSupported if the server is of a +// version prior to 1.2.0. +func (nc *Conn) GetClientID() (uint64, error) { + nc.mu.RLock() + defer nc.mu.RUnlock() + if nc.isClosed() { + return 0, ErrConnectionClosed + } + if nc.info.CID == 0 { + return 0, ErrClientIDNotSupported + } + return nc.info.CID, nil +} + +// NkeyOptionFromSeed will load an nkey pair from a seed file. +// It will return the NKey Option and will handle +// signing of nonce challenges from the server. It will take +// care to not hold keys in memory and to wipe memory. +func NkeyOptionFromSeed(seedFile string) (Option, error) { + kp, err := nkeyPairFromSeedFile(seedFile) + if err != nil { + return nil, err + } + // Wipe our key on exit. + defer kp.Wipe() + + pub, err := kp.PublicKey() + if err != nil { + return nil, err + } + if !nkeys.IsValidPublicUserKey(pub) { + return nil, fmt.Errorf("nats: Not a valid nkey user seed") + } + sigCB := func(nonce []byte) ([]byte, error) { + return sigHandler(nonce, seedFile) + } + return Nkey(string(pub), sigCB), nil +} + +// Just wipe slice with 'x', for clearing contents of creds or nkey seed file. +func wipeSlice(buf []byte) { + for i := range buf { + buf[i] = 'x' + } +} + +func userFromFile(userFile string) (string, error) { + path, err := expandPath(userFile) + if err != nil { + return _EMPTY_, fmt.Errorf("nats: %w", err) + } + + contents, err := os.ReadFile(path) + if err != nil { + return _EMPTY_, fmt.Errorf("nats: %w", err) + } + defer wipeSlice(contents) + return nkeys.ParseDecoratedJWT(contents) +} + +func homeDir() (string, error) { + if runtime.GOOS == "windows" { + homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH") + userProfile := os.Getenv("USERPROFILE") + + var home string + if homeDrive == "" || homePath == "" { + if userProfile == "" { + return _EMPTY_, errors.New("nats: failed to get home dir, require %HOMEDRIVE% and %HOMEPATH% or %USERPROFILE%") + } + home = userProfile + } else { + home = filepath.Join(homeDrive, homePath) + } + + return home, nil + } + + home := os.Getenv("HOME") + if home == "" { + return _EMPTY_, errors.New("nats: failed to get home dir, require $HOME") + } + return home, nil +} + +func expandPath(p string) (string, error) { + p = os.ExpandEnv(p) + + if !strings.HasPrefix(p, "~") { + return p, nil + } + + home, err := homeDir() + if err != nil { + return _EMPTY_, err + } + + return filepath.Join(home, p[1:]), nil +} + +func nkeyPairFromSeedFile(seedFile string) (nkeys.KeyPair, error) { + contents, err := os.ReadFile(seedFile) + if err != nil { + return nil, fmt.Errorf("nats: %w", err) + } + defer wipeSlice(contents) + return nkeys.ParseDecoratedNKey(contents) +} + +// Sign authentication challenges from the server. +// Do not keep private seed in memory. +func sigHandler(nonce []byte, seedFile string) ([]byte, error) { + kp, err := nkeyPairFromSeedFile(seedFile) + if err != nil { + return nil, fmt.Errorf("unable to extract key pair from file %q: %w", seedFile, err) + } + // Wipe our key on exit. + defer kp.Wipe() + + sig, _ := kp.Sign(nonce) + return sig, nil +} + +type timeoutWriter struct { + timeout time.Duration + conn net.Conn + err error +} + +// Write implements the io.Writer interface. +func (tw *timeoutWriter) Write(p []byte) (int, error) { + if tw.err != nil { + return 0, tw.err + } + + var n int + tw.conn.SetWriteDeadline(time.Now().Add(tw.timeout)) + n, tw.err = tw.conn.Write(p) + tw.conn.SetWriteDeadline(time.Time{}) + return n, tw.err +} diff --git a/vendor/github.com/nats-io/nats.go/netchan.go b/vendor/github.com/nats-io/nats.go/netchan.go new file mode 100644 index 00000000..a1af9e06 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/netchan.go @@ -0,0 +1,111 @@ +// Copyright 2013-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "errors" + "reflect" +) + +// This allows the functionality for network channels by binding send and receive Go chans +// to subjects and optionally queue groups. +// Data will be encoded and decoded via the EncodedConn and its associated encoders. + +// BindSendChan binds a channel for send operations to NATS. +func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return ErrChanArg + } + go chPublish(c, chVal, subject) + return nil +} + +// Publish all values that arrive on the channel until it is closed or we +// encounter an error. +func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { + for { + val, ok := chVal.Recv() + if !ok { + // Channel has most likely been closed. + return + } + if e := c.Publish(subject, val.Interface()); e != nil { + // Do this under lock. + c.Conn.mu.Lock() + defer c.Conn.mu.Unlock() + + if c.Conn.Opts.AsyncErrorCB != nil { + // FIXME(dlc) - Not sure this is the right thing to do. + // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback + if c.Conn.isClosed() { + go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) + } else { + c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }) + } + } + return + } + } +} + +// BindRecvChan binds a channel for receive operations from NATS. +func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, _EMPTY_, channel) +} + +// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. +func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, queue, channel) +} + +// Internal function to bind receive operations for a channel. +func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return nil, ErrChanArg + } + argType := chVal.Type().Elem() + + cb := func(m *Msg) { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach.push(func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }) + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + // This is a bit hacky, but in this instance we may be trying to send to a closed channel. + // and the user does not know when it is safe to close the channel. + defer func() { + // If we have panicked, recover and close the subscription. + if r := recover(); r != nil { + m.Sub.Unsubscribe() + } + }() + // Actually do the send to the channel. + chVal.Send(oPtr) + } + + return c.Conn.subscribe(subject, queue, cb, nil, false, nil) +} diff --git a/vendor/github.com/nats-io/nats.go/object.go b/vendor/github.com/nats-io/nats.go/object.go new file mode 100644 index 00000000..133001fe --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/object.go @@ -0,0 +1,1358 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "hash" + "io" + "net" + "os" + "strings" + "sync" + "time" + + "github.com/nats-io/nuid" +) + +// ObjectStoreManager creates, loads and deletes Object Stores +// +// Notice: Experimental Preview +// +// This functionality is EXPERIMENTAL and may be changed in later releases. +type ObjectStoreManager interface { + // ObjectStore will look up and bind to an existing object store instance. + ObjectStore(bucket string) (ObjectStore, error) + // CreateObjectStore will create an object store. + CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) + // DeleteObjectStore will delete the underlying stream for the named object. + DeleteObjectStore(bucket string) error + // ObjectStoreNames is used to retrieve a list of bucket names + ObjectStoreNames(opts ...ObjectOpt) <-chan string + // ObjectStores is used to retrieve a list of bucket statuses + ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus +} + +// ObjectStore is a blob store capable of storing large objects efficiently in +// JetStream streams +// +// Notice: Experimental Preview +// +// This functionality is EXPERIMENTAL and may be changed in later releases. +type ObjectStore interface { + // Put will place the contents from the reader into a new object. + Put(obj *ObjectMeta, reader io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) + // Get will pull the named object from the object store. + Get(name string, opts ...GetObjectOpt) (ObjectResult, error) + + // PutBytes is convenience function to put a byte slice into this object store. + PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) + // GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. + GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) + + // PutString is convenience function to put a string into this object store. + PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) + // GetString is a convenience function to pull an object from this object store and return it as a string. + GetString(name string, opts ...GetObjectOpt) (string, error) + + // PutFile is convenience function to put a file into this object store. + PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) + // GetFile is a convenience function to pull an object from this object store and place it in a file. + GetFile(name, file string, opts ...GetObjectOpt) error + + // GetInfo will retrieve the current information for the object. + GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) + // UpdateMeta will update the metadata for the object. + UpdateMeta(name string, meta *ObjectMeta) error + + // Delete will delete the named object. + Delete(name string) error + + // AddLink will add a link to another object. + AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) + + // AddBucketLink will add a link to another object store. + AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) + + // Seal will seal the object store, no further modifications will be allowed. + Seal() error + + // Watch for changes in the underlying store and receive meta information updates. + Watch(opts ...WatchOpt) (ObjectWatcher, error) + + // List will list all the objects in this store. + List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) + + // Status retrieves run-time status about the backing store of the bucket. + Status() (ObjectStoreStatus, error) +} + +type ObjectOpt interface { + configureObject(opts *objOpts) error +} + +type objOpts struct { + ctx context.Context +} + +// For nats.Context() support. +func (ctx ContextOpt) configureObject(opts *objOpts) error { + opts.ctx = ctx + return nil +} + +// ObjectWatcher is what is returned when doing a watch. +type ObjectWatcher interface { + // Updates returns a channel to read any updates to entries. + Updates() <-chan *ObjectInfo + // Stop will stop this watcher. + Stop() error +} + +var ( + ErrObjectConfigRequired = errors.New("nats: object-store config required") + ErrBadObjectMeta = errors.New("nats: object-store meta information invalid") + ErrObjectNotFound = errors.New("nats: object not found") + ErrInvalidStoreName = errors.New("nats: invalid object-store name") + ErrDigestMismatch = errors.New("nats: received a corrupt object, digests do not match") + ErrInvalidDigestFormat = errors.New("nats: object digest hash has invalid format") + ErrNoObjectsFound = errors.New("nats: no objects found") + ErrObjectAlreadyExists = errors.New("nats: an object already exists with that name") + ErrNameRequired = errors.New("nats: name is required") + ErrNeeds262 = errors.New("nats: object-store requires at least server version 2.6.2") + ErrLinkNotAllowed = errors.New("nats: link cannot be set when putting the object in bucket") + ErrObjectRequired = errors.New("nats: object required") + ErrNoLinkToDeleted = errors.New("nats: not allowed to link to a deleted object") + ErrNoLinkToLink = errors.New("nats: not allowed to link to another link") + ErrCantGetBucket = errors.New("nats: invalid Get, object is a link to a bucket") + ErrBucketRequired = errors.New("nats: bucket required") + ErrBucketMalformed = errors.New("nats: bucket malformed") + ErrUpdateMetaDeleted = errors.New("nats: cannot update meta for a deleted object") +) + +// ObjectStoreConfig is the config for the object store. +type ObjectStoreConfig struct { + Bucket string + Description string + TTL time.Duration + MaxBytes int64 + Storage StorageType + Replicas int + Placement *Placement +} + +type ObjectStoreStatus interface { + // Bucket is the name of the bucket + Bucket() string + // Description is the description supplied when creating the bucket + Description() string + // TTL indicates how long objects are kept in the bucket + TTL() time.Duration + // Storage indicates the underlying JetStream storage technology used to store data + Storage() StorageType + // Replicas indicates how many storage replicas are kept for the data in the bucket + Replicas() int + // Sealed indicates the stream is sealed and cannot be modified in any way + Sealed() bool + // Size is the combined size of all data in the bucket including metadata, in bytes + Size() uint64 + // BackingStore provides details about the underlying storage + BackingStore() string +} + +// ObjectMetaOptions +type ObjectMetaOptions struct { + Link *ObjectLink `json:"link,omitempty"` + ChunkSize uint32 `json:"max_chunk_size,omitempty"` +} + +// ObjectMeta is high level information about an object. +type ObjectMeta struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Headers Header `json:"headers,omitempty"` + + // Optional options. + Opts *ObjectMetaOptions `json:"options,omitempty"` +} + +// ObjectInfo is meta plus instance information. +type ObjectInfo struct { + ObjectMeta + Bucket string `json:"bucket"` + NUID string `json:"nuid"` + Size uint64 `json:"size"` + ModTime time.Time `json:"mtime"` + Chunks uint32 `json:"chunks"` + Digest string `json:"digest,omitempty"` + Deleted bool `json:"deleted,omitempty"` +} + +// ObjectLink is used to embed links to other buckets and objects. +type ObjectLink struct { + // Bucket is the name of the other object store. + Bucket string `json:"bucket"` + // Name can be used to link to a single object. + // If empty means this is a link to the whole store, like a directory. + Name string `json:"name,omitempty"` +} + +// ObjectResult will return the underlying stream info and also be an io.ReadCloser. +type ObjectResult interface { + io.ReadCloser + Info() (*ObjectInfo, error) + Error() error +} + +const ( + objNameTmpl = "OBJ_%s" // OBJ_ // stream name + objAllChunksPreTmpl = "$O.%s.C.>" // $O..C.> // chunk stream subject + objAllMetaPreTmpl = "$O.%s.M.>" // $O..M.> // meta stream subject + objChunksPreTmpl = "$O.%s.C.%s" // $O..C. // chunk message subject + objMetaPreTmpl = "$O.%s.M.%s" // $O..M. // meta message subject + objNoPending = "0" + objDefaultChunkSize = uint32(128 * 1024) // 128k + objDigestType = "SHA-256=" + objDigestTmpl = objDigestType + "%s" +) + +type obs struct { + name string + stream string + js *js +} + +// CreateObjectStore will create an object store. +func (js *js) CreateObjectStore(cfg *ObjectStoreConfig) (ObjectStore, error) { + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, ErrNeeds262 + } + if cfg == nil { + return nil, ErrObjectConfigRequired + } + if !validBucketRe.MatchString(cfg.Bucket) { + return nil, ErrInvalidStoreName + } + + name := cfg.Bucket + chunks := fmt.Sprintf(objAllChunksPreTmpl, name) + meta := fmt.Sprintf(objAllMetaPreTmpl, name) + + // We will set explicitly some values so that we can do comparison + // if we get an "already in use" error and need to check if it is same. + // See kv + replicas := cfg.Replicas + if replicas == 0 { + replicas = 1 + } + maxBytes := cfg.MaxBytes + if maxBytes == 0 { + maxBytes = -1 + } + + scfg := &StreamConfig{ + Name: fmt.Sprintf(objNameTmpl, name), + Description: cfg.Description, + Subjects: []string{chunks, meta}, + MaxAge: cfg.TTL, + MaxBytes: maxBytes, + Storage: cfg.Storage, + Replicas: replicas, + Placement: cfg.Placement, + Discard: DiscardNew, + AllowRollup: true, + AllowDirect: true, + } + + // Create our stream. + _, err := js.AddStream(scfg) + if err != nil { + return nil, err + } + + return &obs{name: name, stream: scfg.Name, js: js}, nil +} + +// ObjectStore will look up and bind to an existing object store instance. +func (js *js) ObjectStore(bucket string) (ObjectStore, error) { + if !validBucketRe.MatchString(bucket) { + return nil, ErrInvalidStoreName + } + if !js.nc.serverMinVersion(2, 6, 2) { + return nil, ErrNeeds262 + } + + stream := fmt.Sprintf(objNameTmpl, bucket) + si, err := js.StreamInfo(stream) + if err != nil { + return nil, err + } + return &obs{name: bucket, stream: si.Config.Name, js: js}, nil +} + +// DeleteObjectStore will delete the underlying stream for the named object. +func (js *js) DeleteObjectStore(bucket string) error { + stream := fmt.Sprintf(objNameTmpl, bucket) + return js.DeleteStream(stream) +} + +func encodeName(name string) string { + return base64.URLEncoding.EncodeToString([]byte(name)) +} + +// Put will place the contents from the reader into this object-store. +func (obs *obs) Put(meta *ObjectMeta, r io.Reader, opts ...ObjectOpt) (*ObjectInfo, error) { + if meta == nil || meta.Name == "" { + return nil, ErrBadObjectMeta + } + + if meta.Opts == nil { + meta.Opts = &ObjectMetaOptions{ChunkSize: objDefaultChunkSize} + } else if meta.Opts.Link != nil { + return nil, ErrLinkNotAllowed + } else if meta.Opts.ChunkSize == 0 { + meta.Opts.ChunkSize = objDefaultChunkSize + } + + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil, err + } + } + } + ctx := o.ctx + + // Create the new nuid so chunks go on a new subject if the name is re-used + newnuid := nuid.Next() + + // These will be used in more than one place + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, newnuid) + + // Grab existing meta info (einfo). Ok to be found or not found, any other error is a problem + // Chunks on the old nuid can be cleaned up at the end + einfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) // GetInfo will encode the name + if err != nil && err != ErrObjectNotFound { + return nil, err + } + + // For async error handling + var perr error + var mu sync.Mutex + setErr := func(err error) { + mu.Lock() + defer mu.Unlock() + perr = err + } + getErr := func() error { + mu.Lock() + defer mu.Unlock() + return perr + } + + purgePartial := func() { obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) } + + // Create our own JS context to handle errors etc. + js, err := obs.js.nc.JetStream(PublishAsyncErrHandler(func(js JetStream, _ *Msg, err error) { setErr(err) })) + if err != nil { + return nil, err + } + + m, h := NewMsg(chunkSubj), sha256.New() + chunk, sent, total := make([]byte, meta.Opts.ChunkSize), 0, uint64(0) + + // set up the info object. The chunk upload sets the size and digest + info := &ObjectInfo{Bucket: obs.name, NUID: newnuid, ObjectMeta: *meta} + + for r != nil { + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = ErrTimeout + } + default: + } + if err != nil { + purgePartial() + return nil, err + } + } + + // Actual read. + // TODO(dlc) - Deadline? + n, readErr := r.Read(chunk) + + // Handle all non EOF errors + if readErr != nil && readErr != io.EOF { + purgePartial() + return nil, readErr + } + + // Add chunk only if we received data + if n > 0 { + // Chunk processing. + m.Data = chunk[:n] + h.Write(m.Data) + + // Send msg itself. + if _, err := js.PublishMsgAsync(m); err != nil { + purgePartial() + return nil, err + } + if err := getErr(); err != nil { + purgePartial() + return nil, err + } + // Update totals. + sent++ + total += uint64(n) + } + + // EOF Processing. + if readErr == io.EOF { + // Place meta info. + info.Size, info.Chunks = uint64(total), uint32(sent) + info.Digest = GetObjectDigestValue(h) + break + } + } + + // Prepare the meta message + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(meta.Name)) + mm := NewMsg(metaSubj) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data, err = json.Marshal(info) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Publish the meta message. + _, err = js.PublishMsgAsync(mm) + if err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + + // Wait for all to be processed. + select { + case <-js.PublishAsyncComplete(): + if err := getErr(); err != nil { + if r != nil { + purgePartial() + } + return nil, err + } + case <-time.After(obs.js.opts.wait): + return nil, ErrTimeout + } + + info.ModTime = time.Now().UTC() // This time is not actually the correct time + + // Delete any original chunks. + if einfo != nil && !einfo.Deleted { + echunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, einfo.NUID) + obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: echunkSubj}) + } + + // TODO would it be okay to do this to return the info with the correct time? + // With the understanding that it is an extra call to the server. + // Otherwise the time the user gets back is the client time, not the server time. + // return obs.GetInfo(info.Name) + + return info, nil +} + +// GetObjectDigestValue calculates the base64 value of hashed data +func GetObjectDigestValue(data hash.Hash) string { + sha := data.Sum(nil) + return fmt.Sprintf(objDigestTmpl, base64.URLEncoding.EncodeToString(sha[:])) +} + +// DecodeObjectDigest decodes base64 hash +func DecodeObjectDigest(data string) ([]byte, error) { + digest := strings.SplitN(data, "=", 2) + if len(digest) != 2 { + return nil, ErrInvalidDigestFormat + } + return base64.URLEncoding.DecodeString(digest[1]) +} + +// ObjectResult impl. +type objResult struct { + sync.Mutex + info *ObjectInfo + r io.ReadCloser + err error + ctx context.Context + digest hash.Hash +} + +func (info *ObjectInfo) isLink() bool { + return info.ObjectMeta.Opts != nil && info.ObjectMeta.Opts.Link != nil +} + +type GetObjectOpt interface { + configureGetObject(opts *getObjectOpts) error +} +type getObjectOpts struct { + ctx context.Context + // Include deleted object in the result. + showDeleted bool +} + +type getObjectFn func(opts *getObjectOpts) error + +func (opt getObjectFn) configureGetObject(opts *getObjectOpts) error { + return opt(opts) +} + +// GetObjectShowDeleted makes Get() return object if it was marked as deleted. +func GetObjectShowDeleted() GetObjectOpt { + return getObjectFn(func(opts *getObjectOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureGetObject(opts *getObjectOpts) error { + opts.ctx = ctx + return nil +} + +// Get will pull the object from the underlying stream. +func (obs *obs) Get(name string, opts ...GetObjectOpt) (ObjectResult, error) { + var o getObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureGetObject(&o); err != nil { + return nil, err + } + } + } + ctx := o.ctx + infoOpts := make([]GetObjectInfoOpt, 0) + if ctx != nil { + infoOpts = append(infoOpts, Context(ctx)) + } + if o.showDeleted { + infoOpts = append(infoOpts, GetObjectInfoShowDeleted()) + } + + // Grab meta info. + info, err := obs.GetInfo(name, infoOpts...) + if err != nil { + return nil, err + } + if info.NUID == _EMPTY_ { + return nil, ErrBadObjectMeta + } + + // Check for object links. If single objects we do a pass through. + if info.isLink() { + if info.ObjectMeta.Opts.Link.Name == _EMPTY_ { + return nil, ErrCantGetBucket + } + + // is the link in the same bucket? + lbuck := info.ObjectMeta.Opts.Link.Bucket + if lbuck == obs.name { + return obs.Get(info.ObjectMeta.Opts.Link.Name) + } + + // different bucket + lobs, err := obs.js.ObjectStore(lbuck) + if err != nil { + return nil, err + } + return lobs.Get(info.ObjectMeta.Opts.Link.Name) + } + + result := &objResult{info: info, ctx: ctx} + if info.Size == 0 { + return result, nil + } + + pr, pw := net.Pipe() + result.r = pr + + gotErr := func(m *Msg, err error) { + pw.Close() + m.Sub.Unsubscribe() + result.setErr(err) + } + + // For calculating sum256 + result.digest = sha256.New() + + processChunk := func(m *Msg) { + if ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + err = ctx.Err() + } else { + err = ErrTimeout + } + default: + } + if err != nil { + gotErr(m, err) + return + } + } + + tokens, err := getMetadataFields(m.Reply) + if err != nil { + gotErr(m, err) + return + } + + // Write to our pipe. + for b := m.Data; len(b) > 0; { + n, err := pw.Write(b) + if err != nil { + gotErr(m, err) + return + } + b = b[n:] + } + // Update sha256 + result.digest.Write(m.Data) + + // Check if we are done. + if tokens[ackNumPendingTokenPos] == objNoPending { + pw.Close() + m.Sub.Unsubscribe() + } + } + + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + _, err = obs.js.Subscribe(chunkSubj, processChunk, OrderedConsumer()) + if err != nil { + return nil, err + } + + return result, nil +} + +// Delete will delete the object. +func (obs *obs) Delete(name string) error { + // Grab meta info. + info, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) + if err != nil { + return err + } + if info.NUID == _EMPTY_ { + return ErrBadObjectMeta + } + + // Place a rollup delete marker and publish the info + info.Deleted = true + info.Size, info.Chunks, info.Digest = 0, 0, _EMPTY_ + + if err = publishMeta(info, obs.js); err != nil { + return err + } + + // Purge chunks for the object. + chunkSubj := fmt.Sprintf(objChunksPreTmpl, obs.name, info.NUID) + return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: chunkSubj}) +} + +func publishMeta(info *ObjectInfo, js JetStreamContext) error { + // marshal the object into json, don't store an actual time + info.ModTime = time.Time{} + data, err := json.Marshal(info) + if err != nil { + return err + } + + // Prepare and publish the message. + mm := NewMsg(fmt.Sprintf(objMetaPreTmpl, info.Bucket, encodeName(info.ObjectMeta.Name))) + mm.Header.Set(MsgRollup, MsgRollupSubject) + mm.Data = data + if _, err := js.PublishMsg(mm); err != nil { + return err + } + + // set the ModTime in case it's returned to the user, even though it's not the correct time. + info.ModTime = time.Now().UTC() + return nil +} + +// AddLink will add a link to another object if it's not deleted and not another link +// name is the name of this link object +// obj is what is being linked too +func (obs *obs) AddLink(name string, obj *ObjectInfo) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + + // TODO Handle stale info + + if obj == nil || obj.Name == "" { + return nil, ErrObjectRequired + } + if obj.Deleted { + return nil, ErrNoLinkToDeleted + } + if obj.isLink() { + return nil, ErrNoLinkToLink + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := obs.GetInfo(name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: obj.Bucket, Name: obj.Name}}, + } + info := &ObjectInfo{Bucket: obs.name, NUID: nuid.Next(), ModTime: time.Now().UTC(), ObjectMeta: *meta} + + // put the link object + if err = publishMeta(info, obs.js); err != nil { + return nil, err + } + + return info, nil +} + +// AddBucketLink will add a link to another object store. +func (ob *obs) AddBucketLink(name string, bucket ObjectStore) (*ObjectInfo, error) { + if name == "" { + return nil, ErrNameRequired + } + if bucket == nil { + return nil, ErrBucketRequired + } + bos, ok := bucket.(*obs) + if !ok { + return nil, ErrBucketMalformed + } + + // If object with link's name is found, error. + // If link with link's name is found, that's okay to overwrite. + // If there was an error that was not ErrObjectNotFound, error. + einfo, err := ob.GetInfo(name, GetObjectInfoShowDeleted()) + if einfo != nil { + if !einfo.isLink() { + return nil, ErrObjectAlreadyExists + } + } else if err != ErrObjectNotFound { + return nil, err + } + + // create the meta for the link + meta := &ObjectMeta{ + Name: name, + Opts: &ObjectMetaOptions{Link: &ObjectLink{Bucket: bos.name}}, + } + info := &ObjectInfo{Bucket: ob.name, NUID: nuid.Next(), ObjectMeta: *meta} + + // put the link object + err = publishMeta(info, ob.js) + if err != nil { + return nil, err + } + + return info, nil +} + +// PutBytes is convenience function to put a byte slice into this object store. +func (obs *obs) PutBytes(name string, data []byte, opts ...ObjectOpt) (*ObjectInfo, error) { + return obs.Put(&ObjectMeta{Name: name}, bytes.NewReader(data), opts...) +} + +// GetBytes is a convenience function to pull an object from this object store and return it as a byte slice. +func (obs *obs) GetBytes(name string, opts ...GetObjectOpt) ([]byte, error) { + result, err := obs.Get(name, opts...) + if err != nil { + return nil, err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// PutString is convenience function to put a string into this object store. +func (obs *obs) PutString(name string, data string, opts ...ObjectOpt) (*ObjectInfo, error) { + return obs.Put(&ObjectMeta{Name: name}, strings.NewReader(data), opts...) +} + +// GetString is a convenience function to pull an object from this object store and return it as a string. +func (obs *obs) GetString(name string, opts ...GetObjectOpt) (string, error) { + result, err := obs.Get(name, opts...) + if err != nil { + return _EMPTY_, err + } + defer result.Close() + + var b bytes.Buffer + if _, err := b.ReadFrom(result); err != nil { + return _EMPTY_, err + } + return b.String(), nil +} + +// PutFile is convenience function to put a file into an object store. +func (obs *obs) PutFile(file string, opts ...ObjectOpt) (*ObjectInfo, error) { + f, err := os.Open(file) + if err != nil { + return nil, err + } + defer f.Close() + return obs.Put(&ObjectMeta{Name: file}, f, opts...) +} + +// GetFile is a convenience function to pull and object and place in a file. +func (obs *obs) GetFile(name, file string, opts ...GetObjectOpt) error { + // Expect file to be new. + f, err := os.OpenFile(file, os.O_WRONLY|os.O_CREATE, 0600) + if err != nil { + return err + } + defer f.Close() + + result, err := obs.Get(name, opts...) + if err != nil { + os.Remove(f.Name()) + return err + } + defer result.Close() + + // Stream copy to the file. + _, err = io.Copy(f, result) + return err +} + +type GetObjectInfoOpt interface { + configureGetInfo(opts *getObjectInfoOpts) error +} +type getObjectInfoOpts struct { + ctx context.Context + // Include deleted object in the result. + showDeleted bool +} + +type getObjectInfoFn func(opts *getObjectInfoOpts) error + +func (opt getObjectInfoFn) configureGetInfo(opts *getObjectInfoOpts) error { + return opt(opts) +} + +// GetObjectInfoShowDeleted makes GetInfo() return object if it was marked as deleted. +func GetObjectInfoShowDeleted() GetObjectInfoOpt { + return getObjectInfoFn(func(opts *getObjectInfoOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureGetInfo(opts *getObjectInfoOpts) error { + opts.ctx = ctx + return nil +} + +// GetInfo will retrieve the current information for the object. +func (obs *obs) GetInfo(name string, opts ...GetObjectInfoOpt) (*ObjectInfo, error) { + // Grab last meta value we have. + if name == "" { + return nil, ErrNameRequired + } + var o getObjectInfoOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureGetInfo(&o); err != nil { + return nil, err + } + } + } + + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) // used as data in a JS API call + stream := fmt.Sprintf(objNameTmpl, obs.name) + + m, err := obs.js.GetLastMsg(stream, metaSubj) + if err != nil { + if err == ErrMsgNotFound { + err = ErrObjectNotFound + } + return nil, err + } + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return nil, ErrBadObjectMeta + } + if !o.showDeleted && info.Deleted { + return nil, ErrObjectNotFound + } + info.ModTime = m.Time + return &info, nil +} + +// UpdateMeta will update the meta for the object. +func (obs *obs) UpdateMeta(name string, meta *ObjectMeta) error { + if meta == nil { + return ErrBadObjectMeta + } + + // Grab the current meta. + info, err := obs.GetInfo(name) + if err != nil { + if errors.Is(err, ErrObjectNotFound) { + return ErrUpdateMetaDeleted + } + return err + } + + // If the new name is different from the old, and it exists, error + // If there was an error that was not ErrObjectNotFound, error. + if name != meta.Name { + existingInfo, err := obs.GetInfo(meta.Name, GetObjectInfoShowDeleted()) + if err != nil && !errors.Is(err, ErrObjectNotFound) { + return err + } + if err == nil && !existingInfo.Deleted { + return ErrObjectAlreadyExists + } + } + + // Update Meta prevents update of ObjectMetaOptions (Link, ChunkSize) + // These should only be updated internally when appropriate. + info.Name = meta.Name + info.Description = meta.Description + info.Headers = meta.Headers + + // Prepare the meta message + if err = publishMeta(info, obs.js); err != nil { + return err + } + + // did the name of this object change? We just stored the meta under the new name + // so delete the meta from the old name via purge stream for subject + if name != meta.Name { + metaSubj := fmt.Sprintf(objMetaPreTmpl, obs.name, encodeName(name)) + return obs.js.purgeStream(obs.stream, &StreamPurgeRequest{Subject: metaSubj}) + } + + return nil +} + +// Seal will seal the object store, no further modifications will be allowed. +func (obs *obs) Seal() error { + stream := fmt.Sprintf(objNameTmpl, obs.name) + si, err := obs.js.StreamInfo(stream) + if err != nil { + return err + } + // Seal the stream from being able to take on more messages. + cfg := si.Config + cfg.Sealed = true + _, err = obs.js.UpdateStream(&cfg) + return err +} + +// Implementation for Watch +type objWatcher struct { + updates chan *ObjectInfo + sub *Subscription +} + +// Updates returns the interior channel. +func (w *objWatcher) Updates() <-chan *ObjectInfo { + if w == nil { + return nil + } + return w.updates +} + +// Stop will unsubscribe from the watcher. +func (w *objWatcher) Stop() error { + if w == nil { + return nil + } + return w.sub.Unsubscribe() +} + +// Watch for changes in the underlying store and receive meta information updates. +func (obs *obs) Watch(opts ...WatchOpt) (ObjectWatcher, error) { + var o watchOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureWatcher(&o); err != nil { + return nil, err + } + } + } + + var initDoneMarker bool + + w := &objWatcher{updates: make(chan *ObjectInfo, 32)} + + update := func(m *Msg) { + var info ObjectInfo + if err := json.Unmarshal(m.Data, &info); err != nil { + return // TODO(dlc) - Communicate this upwards? + } + meta, err := m.Metadata() + if err != nil { + return + } + + if !o.ignoreDeletes || !info.Deleted { + info.ModTime = meta.Timestamp + w.updates <- &info + } + + if !initDoneMarker && meta.NumPending == 0 { + initDoneMarker = true + w.updates <- nil + } + } + + allMeta := fmt.Sprintf(objAllMetaPreTmpl, obs.name) + _, err := obs.js.GetLastMsg(obs.stream, allMeta) + if err == ErrMsgNotFound { + initDoneMarker = true + w.updates <- nil + } + + // Used ordered consumer to deliver results. + subOpts := []SubOpt{OrderedConsumer()} + if !o.includeHistory { + subOpts = append(subOpts, DeliverLastPerSubject()) + } + sub, err := obs.js.Subscribe(allMeta, update, subOpts...) + if err != nil { + return nil, err + } + w.sub = sub + return w, nil +} + +type ListObjectsOpt interface { + configureListObjects(opts *listObjectOpts) error +} +type listObjectOpts struct { + ctx context.Context + // Include deleted objects in the result channel. + showDeleted bool +} + +type listObjectsFn func(opts *listObjectOpts) error + +func (opt listObjectsFn) configureListObjects(opts *listObjectOpts) error { + return opt(opts) +} + +// ListObjectsShowDeleted makes ListObjects() return deleted objects. +func ListObjectsShowDeleted() ListObjectsOpt { + return listObjectsFn(func(opts *listObjectOpts) error { + opts.showDeleted = true + return nil + }) +} + +// For nats.Context() support. +func (ctx ContextOpt) configureListObjects(opts *listObjectOpts) error { + opts.ctx = ctx + return nil +} + +// List will list all the objects in this store. +func (obs *obs) List(opts ...ListObjectsOpt) ([]*ObjectInfo, error) { + var o listObjectOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureListObjects(&o); err != nil { + return nil, err + } + } + } + watchOpts := make([]WatchOpt, 0) + if !o.showDeleted { + watchOpts = append(watchOpts, IgnoreDeletes()) + } + watcher, err := obs.Watch(watchOpts...) + if err != nil { + return nil, err + } + defer watcher.Stop() + if o.ctx == nil { + o.ctx = context.Background() + } + + var objs []*ObjectInfo + updates := watcher.Updates() +Updates: + for { + select { + case entry := <-updates: + if entry == nil { + break Updates + } + objs = append(objs, entry) + case <-o.ctx.Done(): + return nil, o.ctx.Err() + } + } + if len(objs) == 0 { + return nil, ErrNoObjectsFound + } + return objs, nil +} + +// ObjectBucketStatus represents status of a Bucket, implements ObjectStoreStatus +type ObjectBucketStatus struct { + nfo *StreamInfo + bucket string +} + +// Bucket is the name of the bucket +func (s *ObjectBucketStatus) Bucket() string { return s.bucket } + +// Description is the description supplied when creating the bucket +func (s *ObjectBucketStatus) Description() string { return s.nfo.Config.Description } + +// TTL indicates how long objects are kept in the bucket +func (s *ObjectBucketStatus) TTL() time.Duration { return s.nfo.Config.MaxAge } + +// Storage indicates the underlying JetStream storage technology used to store data +func (s *ObjectBucketStatus) Storage() StorageType { return s.nfo.Config.Storage } + +// Replicas indicates how many storage replicas are kept for the data in the bucket +func (s *ObjectBucketStatus) Replicas() int { return s.nfo.Config.Replicas } + +// Sealed indicates the stream is sealed and cannot be modified in any way +func (s *ObjectBucketStatus) Sealed() bool { return s.nfo.Config.Sealed } + +// Size is the combined size of all data in the bucket including metadata, in bytes +func (s *ObjectBucketStatus) Size() uint64 { return s.nfo.State.Bytes } + +// BackingStore indicates what technology is used for storage of the bucket +func (s *ObjectBucketStatus) BackingStore() string { return "JetStream" } + +// StreamInfo is the stream info retrieved to create the status +func (s *ObjectBucketStatus) StreamInfo() *StreamInfo { return s.nfo } + +// Status retrieves run-time status about a bucket +func (obs *obs) Status() (ObjectStoreStatus, error) { + nfo, err := obs.js.StreamInfo(obs.stream) + if err != nil { + return nil, err + } + + status := &ObjectBucketStatus{ + nfo: nfo, + bucket: obs.name, + } + + return status, nil +} + +// Read impl. +func (o *objResult) Read(p []byte) (n int, err error) { + o.Lock() + defer o.Unlock() + if ctx := o.ctx; ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + o.err = ctx.Err() + } else { + o.err = ErrTimeout + } + default: + } + } + if o.err != nil { + return 0, err + } + if o.r == nil { + return 0, io.EOF + } + + r := o.r.(net.Conn) + r.SetReadDeadline(time.Now().Add(2 * time.Second)) + n, err = r.Read(p) + if err, ok := err.(net.Error); ok && err.Timeout() { + if ctx := o.ctx; ctx != nil { + select { + case <-ctx.Done(): + if ctx.Err() == context.Canceled { + return 0, ctx.Err() + } else { + return 0, ErrTimeout + } + default: + err = nil + } + } + } + if err == io.EOF { + // Make sure the digest matches. + sha := o.digest.Sum(nil) + rsha, decodeErr := DecodeObjectDigest(o.info.Digest) + if decodeErr != nil { + o.err = decodeErr + return 0, o.err + } + if !bytes.Equal(sha[:], rsha) { + o.err = ErrDigestMismatch + return 0, o.err + } + } + return n, err +} + +// Close impl. +func (o *objResult) Close() error { + o.Lock() + defer o.Unlock() + if o.r == nil { + return nil + } + return o.r.Close() +} + +func (o *objResult) setErr(err error) { + o.Lock() + defer o.Unlock() + o.err = err +} + +func (o *objResult) Info() (*ObjectInfo, error) { + o.Lock() + defer o.Unlock() + return o.info, o.err +} + +func (o *objResult) Error() error { + o.Lock() + defer o.Unlock() + return o.err +} + +// ObjectStoreNames is used to retrieve a list of bucket names +func (js *js) ObjectStoreNames(opts ...ObjectOpt) <-chan string { + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil + } + } + } + ch := make(chan string) + var cancel context.CancelFunc + if o.ctx == nil { + o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) + } + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") + l.js.opts.ctx = o.ctx + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, "OBJ_") { + continue + } + select { + case ch <- info.Config.Name: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} + +// ObjectStores is used to retrieve a list of bucket statuses +func (js *js) ObjectStores(opts ...ObjectOpt) <-chan ObjectStoreStatus { + var o objOpts + for _, opt := range opts { + if opt != nil { + if err := opt.configureObject(&o); err != nil { + return nil + } + } + } + ch := make(chan ObjectStoreStatus) + var cancel context.CancelFunc + if o.ctx == nil { + o.ctx, cancel = context.WithTimeout(context.Background(), defaultRequestWait) + } + l := &streamLister{js: js} + l.js.opts.streamListSubject = fmt.Sprintf(objAllChunksPreTmpl, "*") + l.js.opts.ctx = o.ctx + go func() { + if cancel != nil { + defer cancel() + } + defer close(ch) + for l.Next() { + for _, info := range l.Page() { + if !strings.HasPrefix(info.Config.Name, "OBJ_") { + continue + } + select { + case ch <- &ObjectBucketStatus{ + nfo: info, + bucket: strings.TrimPrefix(info.Config.Name, "OBJ_"), + }: + case <-o.ctx.Done(): + return + } + } + } + }() + + return ch +} diff --git a/vendor/github.com/nats-io/nats.go/parser.go b/vendor/github.com/nats-io/nats.go/parser.go new file mode 100644 index 00000000..a0f0cca2 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/parser.go @@ -0,0 +1,554 @@ +// Copyright 2012-2122 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "fmt" +) + +type msgArg struct { + subject []byte + reply []byte + sid int64 + hdr int + size int +} + +const MAX_CONTROL_LINE_SIZE = 4096 + +type parseState struct { + state int + as int + drop int + hdr int + ma msgArg + argBuf []byte + msgBuf []byte + msgCopied bool + scratch [MAX_CONTROL_LINE_SIZE]byte +} + +const ( + OP_START = iota + OP_PLUS + OP_PLUS_O + OP_PLUS_OK + OP_MINUS + OP_MINUS_E + OP_MINUS_ER + OP_MINUS_ERR + OP_MINUS_ERR_SPC + MINUS_ERR_ARG + OP_M + OP_MS + OP_MSG + OP_MSG_SPC + MSG_ARG + MSG_PAYLOAD + MSG_END + OP_H + OP_P + OP_PI + OP_PIN + OP_PING + OP_PO + OP_PON + OP_PONG + OP_I + OP_IN + OP_INF + OP_INFO + OP_INFO_SPC + INFO_ARG +) + +// parse is the fast protocol parser engine. +func (nc *Conn) parse(buf []byte) error { + var i int + var b byte + + // Move to loop instead of range syntax to allow jumping of i + for i = 0; i < len(buf); i++ { + b = buf[i] + + switch nc.ps.state { + case OP_START: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + nc.ps.hdr = -1 + nc.ps.ma.hdr = -1 + case 'H', 'h': + nc.ps.state = OP_H + nc.ps.hdr = 0 + nc.ps.ma.hdr = 0 + case 'P', 'p': + nc.ps.state = OP_P + case '+': + nc.ps.state = OP_PLUS + case '-': + nc.ps.state = OP_MINUS + case 'I', 'i': + nc.ps.state = OP_I + default: + goto parseErr + } + case OP_H: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + default: + goto parseErr + } + case OP_M: + switch b { + case 'S', 's': + nc.ps.state = OP_MS + default: + goto parseErr + } + case OP_MS: + switch b { + case 'G', 'g': + nc.ps.state = OP_MSG + default: + goto parseErr + } + case OP_MSG: + switch b { + case ' ', '\t': + nc.ps.state = OP_MSG_SPC + default: + goto parseErr + } + case OP_MSG_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MSG_ARG + nc.ps.as = i + } + case MSG_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + if err := nc.processMsgArgs(arg); err != nil { + return err + } + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD + + // jump ahead with the index. If this overruns + // what is left we fall out and process a split buffer. + i = nc.ps.as + nc.ps.ma.size - 1 + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case MSG_PAYLOAD: + if nc.ps.msgBuf != nil { + if len(nc.ps.msgBuf) >= nc.ps.ma.size { + nc.processMsg(nc.ps.msgBuf) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END + } else { + // copy as much as we can to the buffer and skip ahead. + toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) + avail := len(buf) - i + + if avail < toCopy { + toCopy = avail + } + + if toCopy > 0 { + start := len(nc.ps.msgBuf) + // This is needed for copy to work. + nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] + copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) + // Update our index + i = (i + toCopy) - 1 + } else { + nc.ps.msgBuf = append(nc.ps.msgBuf, b) + } + } + } else if i-nc.ps.as >= nc.ps.ma.size { + nc.processMsg(buf[nc.ps.as:i]) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.msgCopied, nc.ps.state = nil, nil, false, MSG_END + } + case MSG_END: + switch b { + case '\n': + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + continue + } + case OP_PLUS: + switch b { + case 'O', 'o': + nc.ps.state = OP_PLUS_O + default: + goto parseErr + } + case OP_PLUS_O: + switch b { + case 'K', 'k': + nc.ps.state = OP_PLUS_OK + default: + goto parseErr + } + case OP_PLUS_OK: + switch b { + case '\n': + nc.processOK() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_MINUS: + switch b { + case 'E', 'e': + nc.ps.state = OP_MINUS_E + default: + goto parseErr + } + case OP_MINUS_E: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ER + default: + goto parseErr + } + case OP_MINUS_ER: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ERR + default: + goto parseErr + } + case OP_MINUS_ERR: + switch b { + case ' ', '\t': + nc.ps.state = OP_MINUS_ERR_SPC + default: + goto parseErr + } + case OP_MINUS_ERR_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MINUS_ERR_ARG + nc.ps.as = i + } + case MINUS_ERR_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processErr(string(arg)) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case OP_P: + switch b { + case 'I', 'i': + nc.ps.state = OP_PI + case 'O', 'o': + nc.ps.state = OP_PO + default: + goto parseErr + } + case OP_PO: + switch b { + case 'N', 'n': + nc.ps.state = OP_PON + default: + goto parseErr + } + case OP_PON: + switch b { + case 'G', 'g': + nc.ps.state = OP_PONG + default: + goto parseErr + } + case OP_PONG: + switch b { + case '\n': + nc.processPong() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_PI: + switch b { + case 'N', 'n': + nc.ps.state = OP_PIN + default: + goto parseErr + } + case OP_PIN: + switch b { + case 'G', 'g': + nc.ps.state = OP_PING + default: + goto parseErr + } + case OP_PING: + switch b { + case '\n': + nc.processPing() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_I: + switch b { + case 'N', 'n': + nc.ps.state = OP_IN + default: + goto parseErr + } + case OP_IN: + switch b { + case 'F', 'f': + nc.ps.state = OP_INF + default: + goto parseErr + } + case OP_INF: + switch b { + case 'O', 'o': + nc.ps.state = OP_INFO + default: + goto parseErr + } + case OP_INFO: + switch b { + case ' ', '\t': + nc.ps.state = OP_INFO_SPC + default: + goto parseErr + } + case OP_INFO_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = INFO_ARG + nc.ps.as = i + } + case INFO_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processAsyncInfo(arg) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + default: + goto parseErr + } + } + // Check for split buffer scenarios + if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) + // FIXME, check max len + } + // Check for split msg + if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { + // We need to clone the msgArg if it is still referencing the + // read buffer and we are not able to process the msg. + if nc.ps.argBuf == nil { + nc.cloneMsgArg() + } + + // If we will overflow the scratch buffer, just create a + // new buffer to hold the split message. + if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { + lrem := len(buf[nc.ps.as:]) + + nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) + copy(nc.ps.msgBuf, buf[nc.ps.as:]) + nc.ps.msgCopied = true + } else { + nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] + nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) + } + } + + return nil + +parseErr: + return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) +} + +// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but +// we need to hold onto it into the next read. +func (nc *Conn) cloneMsgArg() { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) + nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] + if nc.ps.ma.reply != nil { + nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] + } +} + +const argsLenMax = 4 + +func (nc *Conn) processMsgArgs(arg []byte) error { + // Use separate function for header based messages. + if nc.ps.hdr >= 0 { + return nc.processHeaderMsgArgs(arg) + } + + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 3: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.size = int(parseInt64(args[2])) + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.size = int(parseInt64(args[3])) + default: + return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// processHeaderMsgArgs is for a header based message. +func (nc *Conn) processHeaderMsgArgs(arg []byte) error { + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.hdr = int(parseInt64(args[2])) + nc.ps.ma.size = int(parseInt64(args[3])) + case 5: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.hdr = int(parseInt64(args[3])) + nc.ps.ma.size = int(parseInt64(args[4])) + default: + return fmt.Errorf("nats: processHeaderMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.hdr < 0 || nc.ps.ma.hdr > nc.ps.ma.size { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Header Size: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processHeaderMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// ASCII numbers 0-9 +const ( + ascii_0 = 48 + ascii_9 = 57 +) + +// parseInt64 expects decimal positive numbers. We +// return -1 to signal error +func parseInt64(d []byte) (n int64) { + if len(d) == 0 { + return -1 + } + for _, dec := range d { + if dec < ascii_0 || dec > ascii_9 { + return -1 + } + n = n*10 + (int64(dec) - ascii_0) + } + return n +} diff --git a/vendor/github.com/nats-io/nats.go/rand.go b/vendor/github.com/nats-io/nats.go/rand.go new file mode 100644 index 00000000..0cdee0ac --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/rand.go @@ -0,0 +1,29 @@ +// Copyright 2023 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.20 +// +build !go1.20 + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "math/rand" + "time" +) + +func init() { + // This is not needed since Go 1.20 because now rand.Seed always happens + // by default (uses runtime.fastrand64 instead as source). + rand.Seed(time.Now().UnixNano()) +} diff --git a/vendor/github.com/nats-io/nats.go/timer.go b/vendor/github.com/nats-io/nats.go/timer.go new file mode 100644 index 00000000..4fb02ecb --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/timer.go @@ -0,0 +1,56 @@ +// Copyright 2017-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "sync" + "time" +) + +// global pool of *time.Timer's. can be used by multiple goroutines concurrently. +var globalTimerPool timerPool + +// timerPool provides GC-able pooling of *time.Timer's. +// can be used by multiple goroutines concurrently. +type timerPool struct { + p sync.Pool +} + +// Get returns a timer that completes after the given duration. +func (tp *timerPool) Get(d time.Duration) *time.Timer { + if t, _ := tp.p.Get().(*time.Timer); t != nil { + t.Reset(d) + return t + } + + return time.NewTimer(d) +} + +// Put pools the given timer. +// +// There is no need to call t.Stop() before calling Put. +// +// Put will try to stop the timer before pooling. If the +// given timer already expired, Put will read the unreceived +// value if there is one. +func (tp *timerPool) Put(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + + tp.p.Put(t) +} diff --git a/vendor/github.com/nats-io/nats.go/util/tls.go b/vendor/github.com/nats-io/nats.go/util/tls.go new file mode 100644 index 00000000..af9f51f0 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/util/tls.go @@ -0,0 +1,28 @@ +// Copyright 2017-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.8 +// +build go1.8 + +package util + +import "crypto/tls" + +// CloneTLSConfig returns a copy of c. +func CloneTLSConfig(c *tls.Config) *tls.Config { + if c == nil { + return &tls.Config{} + } + + return c.Clone() +} diff --git a/vendor/github.com/nats-io/nats.go/util/tls_go17.go b/vendor/github.com/nats-io/nats.go/util/tls_go17.go new file mode 100644 index 00000000..44d46b42 --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/util/tls_go17.go @@ -0,0 +1,50 @@ +// Copyright 2016-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.7 && !go1.8 +// +build go1.7,!go1.8 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/nats-io/nats.go/ws.go b/vendor/github.com/nats-io/nats.go/ws.go new file mode 100644 index 00000000..c4919a3a --- /dev/null +++ b/vendor/github.com/nats-io/nats.go/ws.go @@ -0,0 +1,772 @@ +// Copyright 2021-2022 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nats + +import ( + "bufio" + "bytes" + "compress/flate" + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "encoding/binary" + "errors" + "fmt" + "io" + mrand "math/rand" + "net/http" + "net/url" + "strings" + "time" + "unicode/utf8" +) + +type wsOpCode int + +const ( + // From https://tools.ietf.org/html/rfc6455#section-5.2 + wsTextMessage = wsOpCode(1) + wsBinaryMessage = wsOpCode(2) + wsCloseMessage = wsOpCode(8) + wsPingMessage = wsOpCode(9) + wsPongMessage = wsOpCode(10) + + wsFinalBit = 1 << 7 + wsRsv1Bit = 1 << 6 // Used for compression, from https://tools.ietf.org/html/rfc7692#section-6 + wsRsv2Bit = 1 << 5 + wsRsv3Bit = 1 << 4 + + wsMaskBit = 1 << 7 + + wsContinuationFrame = 0 + wsMaxFrameHeaderSize = 14 + wsMaxControlPayloadSize = 125 + wsCloseSatusSize = 2 + + // From https://tools.ietf.org/html/rfc6455#section-11.7 + wsCloseStatusNormalClosure = 1000 + wsCloseStatusNoStatusReceived = 1005 + wsCloseStatusAbnormalClosure = 1006 + wsCloseStatusInvalidPayloadData = 1007 + + wsScheme = "ws" + wsSchemeTLS = "wss" + + wsPMCExtension = "permessage-deflate" // per-message compression + wsPMCSrvNoCtx = "server_no_context_takeover" + wsPMCCliNoCtx = "client_no_context_takeover" + wsPMCReqHeaderValue = wsPMCExtension + "; " + wsPMCSrvNoCtx + "; " + wsPMCCliNoCtx +) + +// From https://tools.ietf.org/html/rfc6455#section-1.3 +var wsGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +var compressFinalBlock = []byte{0x00, 0x00, 0xff, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff} + +type websocketReader struct { + r io.Reader + pending [][]byte + ib []byte + ff bool + fc bool + nl bool + dc *wsDecompressor + nc *Conn +} + +type wsDecompressor struct { + flate io.ReadCloser + bufs [][]byte + off int +} + +type websocketWriter struct { + w io.Writer + compress bool + compressor *flate.Writer + ctrlFrames [][]byte // pending frames that should be sent at the next Write() + cm []byte // close message that needs to be sent when everything else has been sent + cmDone bool // a close message has been added or sent (never going back to false) + noMoreSend bool // if true, even if there is a Write() call, we should not send anything +} + +func (d *wsDecompressor) Read(dst []byte) (int, error) { + if len(dst) == 0 { + return 0, nil + } + if len(d.bufs) == 0 { + return 0, io.EOF + } + copied := 0 + rem := len(dst) + for buf := d.bufs[0]; buf != nil && rem > 0; { + n := len(buf[d.off:]) + if n > rem { + n = rem + } + copy(dst[copied:], buf[d.off:d.off+n]) + copied += n + rem -= n + d.off += n + buf = d.nextBuf() + } + return copied, nil +} + +func (d *wsDecompressor) nextBuf() []byte { + // We still have remaining data in the first buffer + if d.off != len(d.bufs[0]) { + return d.bufs[0] + } + // We read the full first buffer. Reset offset. + d.off = 0 + // We were at the last buffer, so we are done. + if len(d.bufs) == 1 { + d.bufs = nil + return nil + } + // Here we move to the next buffer. + d.bufs = d.bufs[1:] + return d.bufs[0] +} + +func (d *wsDecompressor) ReadByte() (byte, error) { + if len(d.bufs) == 0 { + return 0, io.EOF + } + b := d.bufs[0][d.off] + d.off++ + d.nextBuf() + return b, nil +} + +func (d *wsDecompressor) addBuf(b []byte) { + d.bufs = append(d.bufs, b) +} + +func (d *wsDecompressor) decompress() ([]byte, error) { + d.off = 0 + // As per https://tools.ietf.org/html/rfc7692#section-7.2.2 + // add 0x00, 0x00, 0xff, 0xff and then a final block so that flate reader + // does not report unexpected EOF. + d.bufs = append(d.bufs, compressFinalBlock) + // Create or reset the decompressor with his object (wsDecompressor) + // that provides Read() and ReadByte() APIs that will consume from + // the compressed buffers (d.bufs). + if d.flate == nil { + d.flate = flate.NewReader(d) + } else { + d.flate.(flate.Resetter).Reset(d, nil) + } + b, err := io.ReadAll(d.flate) + // Now reset the compressed buffers list + d.bufs = nil + return b, err +} + +func wsNewReader(r io.Reader) *websocketReader { + return &websocketReader{r: r, ff: true} +} + +// From now on, reads will be from the readLoop and we will need to +// acquire the connection lock should we have to send/write a control +// message from handleControlFrame. +// +// Note: this runs under the connection lock. +func (r *websocketReader) doneWithConnect() { + r.nl = true +} + +func (r *websocketReader) Read(p []byte) (int, error) { + var err error + var buf []byte + + if l := len(r.ib); l > 0 { + buf = r.ib + r.ib = nil + } else { + if len(r.pending) > 0 { + return r.drainPending(p), nil + } + + // Get some data from the underlying reader. + n, err := r.r.Read(p) + if err != nil { + return 0, err + } + buf = p[:n] + } + + // Now parse this and decode frames. We will possibly read more to + // ensure that we get a full frame. + var ( + tmpBuf []byte + pos int + max = len(buf) + rem = 0 + ) + for pos < max { + b0 := buf[pos] + frameType := wsOpCode(b0 & 0xF) + final := b0&wsFinalBit != 0 + compressed := b0&wsRsv1Bit != 0 + pos++ + + tmpBuf, pos, err = wsGet(r.r, buf, pos, 1) + if err != nil { + return 0, err + } + b1 := tmpBuf[0] + + // Store size in case it is < 125 + rem = int(b1 & 0x7F) + + switch frameType { + case wsPingMessage, wsPongMessage, wsCloseMessage: + if rem > wsMaxControlPayloadSize { + return 0, fmt.Errorf( + fmt.Sprintf("control frame length bigger than maximum allowed of %v bytes", + wsMaxControlPayloadSize)) + } + if compressed { + return 0, errors.New("control frame should not be compressed") + } + if !final { + return 0, errors.New("control frame does not have final bit set") + } + case wsTextMessage, wsBinaryMessage: + if !r.ff { + return 0, errors.New("new message started before final frame for previous message was received") + } + r.ff = final + r.fc = compressed + case wsContinuationFrame: + // Compressed bit must be only set in the first frame + if r.ff || compressed { + return 0, errors.New("invalid continuation frame") + } + r.ff = final + default: + return 0, fmt.Errorf("unknown opcode %v", frameType) + } + + // If the encoded size is <= 125, then `rem` is simply the remainder size of the + // frame. If it is 126, then the actual size is encoded as a uint16. For larger + // frames, `rem` will initially be 127 and the actual size is encoded as a uint64. + switch rem { + case 126: + tmpBuf, pos, err = wsGet(r.r, buf, pos, 2) + if err != nil { + return 0, err + } + rem = int(binary.BigEndian.Uint16(tmpBuf)) + case 127: + tmpBuf, pos, err = wsGet(r.r, buf, pos, 8) + if err != nil { + return 0, err + } + rem = int(binary.BigEndian.Uint64(tmpBuf)) + } + + // Handle control messages in place... + if wsIsControlFrame(frameType) { + pos, err = r.handleControlFrame(frameType, buf, pos, rem) + if err != nil { + return 0, err + } + rem = 0 + continue + } + + var b []byte + // This ensures that we get the full payload for this frame. + b, pos, err = wsGet(r.r, buf, pos, rem) + if err != nil { + return 0, err + } + // We read the full frame. + rem = 0 + addToPending := true + if r.fc { + // Don't add to pending if we are not dealing with the final frame. + addToPending = r.ff + // Add the compressed payload buffer to the list. + r.addCBuf(b) + // Decompress only when this is the final frame. + if r.ff { + b, err = r.dc.decompress() + if err != nil { + return 0, err + } + r.fc = false + } + } + // Add to the pending list if dealing with uncompressed frames or + // after we have received the full compressed message and decompressed it. + if addToPending { + r.pending = append(r.pending, b) + } + } + // In case of compression, there may be nothing to drain + if len(r.pending) > 0 { + return r.drainPending(p), nil + } + return 0, nil +} + +func (r *websocketReader) addCBuf(b []byte) { + if r.dc == nil { + r.dc = &wsDecompressor{} + } + // Add a copy of the incoming buffer to the list of compressed buffers. + r.dc.addBuf(append([]byte(nil), b...)) +} + +func (r *websocketReader) drainPending(p []byte) int { + var n int + var max = len(p) + + for i, buf := range r.pending { + if n+len(buf) <= max { + copy(p[n:], buf) + n += len(buf) + } else { + // Is there room left? + if n < max { + // Write the partial and update this slice. + rem := max - n + copy(p[n:], buf[:rem]) + n += rem + r.pending[i] = buf[rem:] + } + // These are the remaining slices that will need to be used at + // the next Read() call. + r.pending = r.pending[i:] + return n + } + } + r.pending = r.pending[:0] + return n +} + +func wsGet(r io.Reader, buf []byte, pos, needed int) ([]byte, int, error) { + avail := len(buf) - pos + if avail >= needed { + return buf[pos : pos+needed], pos + needed, nil + } + b := make([]byte, needed) + start := copy(b, buf[pos:]) + for start != needed { + n, err := r.Read(b[start:cap(b)]) + start += n + if err != nil { + return b, start, err + } + } + return b, pos + avail, nil +} + +func (r *websocketReader) handleControlFrame(frameType wsOpCode, buf []byte, pos, rem int) (int, error) { + var payload []byte + var err error + + if rem > 0 { + payload, pos, err = wsGet(r.r, buf, pos, rem) + if err != nil { + return pos, err + } + } + switch frameType { + case wsCloseMessage: + status := wsCloseStatusNoStatusReceived + var body string + lp := len(payload) + // If there is a payload, the status is represented as a 2-byte + // unsigned integer (in network byte order). Then, there may be an + // optional body. + hasStatus, hasBody := lp >= wsCloseSatusSize, lp > wsCloseSatusSize + if hasStatus { + // Decode the status + status = int(binary.BigEndian.Uint16(payload[:wsCloseSatusSize])) + // Now if there is a body, capture it and make sure this is a valid UTF-8. + if hasBody { + body = string(payload[wsCloseSatusSize:]) + if !utf8.ValidString(body) { + // https://tools.ietf.org/html/rfc6455#section-5.5.1 + // If body is present, it must be a valid utf8 + status = wsCloseStatusInvalidPayloadData + body = "invalid utf8 body in close frame" + } + } + } + r.nc.wsEnqueueCloseMsg(r.nl, status, body) + // Return io.EOF so that readLoop will close the connection as client closed + // after processing pending buffers. + return pos, io.EOF + case wsPingMessage: + r.nc.wsEnqueueControlMsg(r.nl, wsPongMessage, payload) + case wsPongMessage: + // Nothing to do.. + } + return pos, nil +} + +func (w *websocketWriter) Write(p []byte) (int, error) { + if w.noMoreSend { + return 0, nil + } + var total int + var n int + var err error + // If there are control frames, they can be sent now. Actually spec says + // that they should be sent ASAP, so we will send before any application data. + if len(w.ctrlFrames) > 0 { + n, err = w.writeCtrlFrames() + if err != nil { + return n, err + } + total += n + } + // Do the following only if there is something to send. + // We will end with checking for need to send close message. + if len(p) > 0 { + if w.compress { + buf := &bytes.Buffer{} + if w.compressor == nil { + w.compressor, _ = flate.NewWriter(buf, flate.BestSpeed) + } else { + w.compressor.Reset(buf) + } + w.compressor.Write(p) + w.compressor.Close() + b := buf.Bytes() + p = b[:len(b)-4] + } + fh, key := wsCreateFrameHeader(w.compress, wsBinaryMessage, len(p)) + wsMaskBuf(key, p) + n, err = w.w.Write(fh) + total += n + if err == nil { + n, err = w.w.Write(p) + total += n + } + } + if err == nil && w.cm != nil { + n, err = w.writeCloseMsg() + total += n + } + return total, err +} + +func (w *websocketWriter) writeCtrlFrames() (int, error) { + var ( + n int + total int + i int + err error + ) + for ; i < len(w.ctrlFrames); i++ { + buf := w.ctrlFrames[i] + n, err = w.w.Write(buf) + total += n + if err != nil { + break + } + } + if i != len(w.ctrlFrames) { + w.ctrlFrames = w.ctrlFrames[i+1:] + } else { + w.ctrlFrames = w.ctrlFrames[:0] + } + return total, err +} + +func (w *websocketWriter) writeCloseMsg() (int, error) { + n, err := w.w.Write(w.cm) + w.cm, w.noMoreSend = nil, true + return n, err +} + +func wsMaskBuf(key, buf []byte) { + for i := 0; i < len(buf); i++ { + buf[i] ^= key[i&3] + } +} + +// Create the frame header. +// Encodes the frame type and optional compression flag, and the size of the payload. +func wsCreateFrameHeader(compressed bool, frameType wsOpCode, l int) ([]byte, []byte) { + fh := make([]byte, wsMaxFrameHeaderSize) + n, key := wsFillFrameHeader(fh, compressed, frameType, l) + return fh[:n], key +} + +func wsFillFrameHeader(fh []byte, compressed bool, frameType wsOpCode, l int) (int, []byte) { + var n int + b := byte(frameType) + b |= wsFinalBit + if compressed { + b |= wsRsv1Bit + } + b1 := byte(wsMaskBit) + switch { + case l <= 125: + n = 2 + fh[0] = b + fh[1] = b1 | byte(l) + case l < 65536: + n = 4 + fh[0] = b + fh[1] = b1 | 126 + binary.BigEndian.PutUint16(fh[2:], uint16(l)) + default: + n = 10 + fh[0] = b + fh[1] = b1 | 127 + binary.BigEndian.PutUint64(fh[2:], uint64(l)) + } + var key []byte + var keyBuf [4]byte + if _, err := io.ReadFull(rand.Reader, keyBuf[:4]); err != nil { + kv := mrand.Int31() + binary.LittleEndian.PutUint32(keyBuf[:4], uint32(kv)) + } + copy(fh[n:], keyBuf[:4]) + key = fh[n : n+4] + n += 4 + return n, key +} + +func (nc *Conn) wsInitHandshake(u *url.URL) error { + compress := nc.Opts.Compression + tlsRequired := u.Scheme == wsSchemeTLS || nc.Opts.Secure || nc.Opts.TLSConfig != nil + // Do TLS here as needed. + if tlsRequired { + if err := nc.makeTLSConn(); err != nil { + return err + } + } else { + nc.bindToNewConn() + } + + var err error + + // For http request, we need the passed URL to contain either http or https scheme. + scheme := "http" + if tlsRequired { + scheme = "https" + } + ustr := fmt.Sprintf("%s://%s", scheme, u.Host) + + if nc.Opts.ProxyPath != "" { + proxyPath := nc.Opts.ProxyPath + if !strings.HasPrefix(proxyPath, "/") { + proxyPath = "/" + proxyPath + } + ustr += proxyPath + } + + u, err = url.Parse(ustr) + if err != nil { + return err + } + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + wsKey, err := wsMakeChallengeKey() + if err != nil { + return err + } + + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{wsKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if compress { + req.Header.Add("Sec-WebSocket-Extensions", wsPMCReqHeaderValue) + } + if err := req.Write(nc.conn); err != nil { + return err + } + + var resp *http.Response + + br := bufio.NewReaderSize(nc.conn, 4096) + nc.conn.SetReadDeadline(time.Now().Add(nc.Opts.Timeout)) + resp, err = http.ReadResponse(br, req) + if err == nil && + (resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != wsAcceptKey(wsKey)) { + + err = fmt.Errorf("invalid websocket connection") + } + // Check compression extension... + if err == nil && compress { + // Check that not only permessage-deflate extension is present, but that + // we also have server and client no context take over. + srvCompress, noCtxTakeover := wsPMCExtensionSupport(resp.Header) + + // If server does not support compression, then simply disable it in our side. + if !srvCompress { + compress = false + } else if !noCtxTakeover { + err = fmt.Errorf("compression negotiation error") + } + } + if resp != nil { + resp.Body.Close() + } + nc.conn.SetReadDeadline(time.Time{}) + if err != nil { + return err + } + + wsr := wsNewReader(nc.br.r) + wsr.nc = nc + // We have to slurp whatever is in the bufio reader and copy to br.r + if n := br.Buffered(); n != 0 { + wsr.ib, _ = br.Peek(n) + } + nc.br.r = wsr + nc.bw.w = &websocketWriter{w: nc.bw.w, compress: compress} + nc.ws = true + return nil +} + +func (nc *Conn) wsClose() { + nc.mu.Lock() + defer nc.mu.Unlock() + if !nc.ws { + return + } + nc.wsEnqueueCloseMsgLocked(wsCloseStatusNormalClosure, _EMPTY_) +} + +func (nc *Conn) wsEnqueueCloseMsg(needsLock bool, status int, payload string) { + // In some low-level unit tests it will happen... + if nc == nil { + return + } + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } + nc.wsEnqueueCloseMsgLocked(status, payload) +} + +func (nc *Conn) wsEnqueueCloseMsgLocked(status int, payload string) { + wr, ok := nc.bw.w.(*websocketWriter) + if !ok || wr.cmDone { + return + } + statusAndPayloadLen := 2 + len(payload) + frame := make([]byte, 2+4+statusAndPayloadLen) + n, key := wsFillFrameHeader(frame, false, wsCloseMessage, statusAndPayloadLen) + // Set the status + binary.BigEndian.PutUint16(frame[n:], uint16(status)) + // If there is a payload, copy + if len(payload) > 0 { + copy(frame[n+2:], payload) + } + // Mask status + payload + wsMaskBuf(key, frame[n:n+statusAndPayloadLen]) + wr.cm = frame + wr.cmDone = true + nc.bw.flush() +} + +func (nc *Conn) wsEnqueueControlMsg(needsLock bool, frameType wsOpCode, payload []byte) { + // In some low-level unit tests it will happen... + if nc == nil { + return + } + if needsLock { + nc.mu.Lock() + defer nc.mu.Unlock() + } + wr, ok := nc.bw.w.(*websocketWriter) + if !ok { + return + } + fh, key := wsCreateFrameHeader(false, frameType, len(payload)) + wr.ctrlFrames = append(wr.ctrlFrames, fh) + if len(payload) > 0 { + wsMaskBuf(key, payload) + wr.ctrlFrames = append(wr.ctrlFrames, payload) + } + nc.bw.flush() +} + +func wsPMCExtensionSupport(header http.Header) (bool, bool) { + for _, extensionList := range header["Sec-Websocket-Extensions"] { + extensions := strings.Split(extensionList, ",") + for _, extension := range extensions { + extension = strings.Trim(extension, " \t") + params := strings.Split(extension, ";") + for i, p := range params { + p = strings.Trim(p, " \t") + if strings.EqualFold(p, wsPMCExtension) { + var snc bool + var cnc bool + for j := i + 1; j < len(params); j++ { + p = params[j] + p = strings.Trim(p, " \t") + if strings.EqualFold(p, wsPMCSrvNoCtx) { + snc = true + } else if strings.EqualFold(p, wsPMCCliNoCtx) { + cnc = true + } + if snc && cnc { + return true, true + } + } + return true, false + } + } + } + } + return false, false +} + +func wsMakeChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +func wsAcceptKey(key string) string { + h := sha1.New() + h.Write([]byte(key)) + h.Write(wsGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +// Returns true if the op code corresponds to a control frame. +func wsIsControlFrame(frameType wsOpCode) bool { + return frameType >= wsCloseMessage +} + +func isWebsocketScheme(u *url.URL) bool { + return u.Scheme == wsScheme || u.Scheme == wsSchemeTLS +} diff --git a/vendor/github.com/nats-io/nkeys/.gitignore b/vendor/github.com/nats-io/nkeys/.gitignore new file mode 100644 index 00000000..9dca5eb4 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/.gitignore @@ -0,0 +1,15 @@ +# Binaries for programs and plugins +*.exe +*.dll +*.so +*.dylib +build/ + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736 +.glide/ diff --git a/vendor/github.com/nats-io/nkeys/.goreleaser.yml b/vendor/github.com/nats-io/nkeys/.goreleaser.yml new file mode 100644 index 00000000..6df08bec --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/.goreleaser.yml @@ -0,0 +1,38 @@ +project_name: nkeys +release: + github: + owner: nats-io + name: nkeys + name_template: '{{.Tag}}' + draft: true +builds: + - main: ./nk/main.go + ldflags: "-X main.Version={{.Tag}}_{{.Commit}}" + binary: nk + goos: + - linux + - darwin + goarch: + - amd64 + + +dist: build + +archive: + wrap_in_directory: true + name_template: '{{ .ProjectName }}-v{{ .Version }}-{{ .Os }}-{{ .Arch }}{{ if .Arm + }}v{{ .Arm }}{{ end }}' + format: zip + +checksum: + name_template: '{{ .ProjectName }}-v{{ .Version }}-checksums.txt' + +snapshot: + name_template: 'dev' + +nfpm: + formats: + - deb + bindir: /usr/local/bin + description: NKeys utility cli program + vendor: nats-io diff --git a/vendor/github.com/nats-io/nkeys/.travis.yml b/vendor/github.com/nats-io/nkeys/.travis.yml new file mode 100644 index 00000000..c13448e2 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/.travis.yml @@ -0,0 +1,35 @@ +language: go +sudo: false + +arch: +- amd64 +- ppc64le +go: +- 1.16.x +- 1.15.x + +install: +- go get -t ./... +- go get github.com/mattn/goveralls +- go get -u honnef.co/go/tools/cmd/staticcheck +- go get -u github.com/client9/misspell/cmd/misspell + +before_script: +- $(exit $(go fmt ./... | wc -l)) +- go vet ./... +- misspell -error -locale US . +- staticcheck ./... + +script: +- go test -v +- go test -v --race +- go test -v -covermode=count -coverprofile=coverage.out +- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci + +#deploy: +#- provider: script +# skip_cleanup: true +# script: curl -sL http://git.io/goreleaser | bash +# on: +# tags: true +# condition: $TRAVIS_OS_NAME = linux diff --git a/vendor/github.com/nats-io/nkeys/GOVERNANCE.md b/vendor/github.com/nats-io/nkeys/GOVERNANCE.md new file mode 100644 index 00000000..744d3bc2 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS NKEYS Governance + +NATS NKEYS is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nkeys/LICENSE b/vendor/github.com/nats-io/nkeys/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nkeys/MAINTAINERS.md b/vendor/github.com/nats-io/nkeys/MAINTAINERS.md new file mode 100644 index 00000000..23214655 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/MAINTAINERS.md @@ -0,0 +1,8 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) + - Ivan Kozlovic [@kozlovic](https://github.com/kozlovic) + - Waldemar Quevedo [@wallyqs](https://github.com/wallyqs) diff --git a/vendor/github.com/nats-io/nkeys/README.md b/vendor/github.com/nats-io/nkeys/README.md new file mode 100644 index 00000000..13032c56 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/README.md @@ -0,0 +1,69 @@ +# NKEYS + +[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) +[![ReportCard](http://goreportcard.com/badge/nats-io/nkeys)](http://goreportcard.com/report/nats-io/nkeys) +[![Build Status](https://travis-ci.com/nats-io/nkeys.svg?branch=master)](http://travis-ci.com/nats-io/nkeys) +[![GoDoc](http://godoc.org/github.com/nats-io/nkeys?status.svg)](http://godoc.org/github.com/nats-io/nkeys) +[![Coverage Status](https://coveralls.io/repos/github/nats-io/nkeys/badge.svg?branch=master&service=github)](https://coveralls.io/github/nats-io/nkeys?branch=master) + +A public-key signature system based on [Ed25519](https://ed25519.cr.yp.to/) for the NATS ecosystem. + +## About + +The NATS ecosystem will be moving to [Ed25519](https://ed25519.cr.yp.to/) keys for identity, authentication and authorization for entities such as Accounts, Users, Servers and Clusters. + +Ed25519 is fast and resistant to side channel attacks. Generation of a seed key is all that is needed to be stored and kept safe, as the seed can generate both the public and private keys. + +The NATS system will utilize Ed25519 keys, meaning that NATS systems will never store or even have access to any private keys. Authentication will utilize a random challenge response mechanism. + +Dealing with 32 byte and 64 byte raw keys can be challenging. NKEYS is designed to formulate keys in a much friendlier fashion and references work done in cryptocurrencies, specifically [Stellar](https://www.stellar.org/). Bitcoin and others used a form of Base58 (or Base58Check) to encode raw keys. Stellar utilized a more traditional Base32 with a CRC16 and a version or prefix byte. NKEYS utilizes a similar format where the prefix will be 1 byte for public and private keys and will be 2 bytes for seeds. The base32 encoding of these prefixes will yield friendly human readable prefixes, e.g. '**N**' = server, '**C**' = cluster, '**O**' = operator, '**A**' = account, and '**U**' = user. '**P**' is used for private keys. For seeds, the first encoded prefix is '**S**', and the second character will be the type for the public key, e.g. "**SU**" is a seed for a user key pair, "**SA**" is a seed for an account key pair. + +## Installation + +Use the `go` command: + + $ go get github.com/nats-io/nkeys + +## nk - Command Line Utility + +Located under the nk [directory](https://github.com/nats-io/nkeys/tree/master/nk). + +## Basic API Usage +```go + +// Create a new User KeyPair +user, _ := nkeys.CreateUser() + +// Sign some data with a full key pair user. +data := []byte("Hello World") +sig, _ := user.Sign(data) + +// Verify the signature. +err = user.Verify(data, sig) + +// Access the seed, the only thing that needs to be stored and kept safe. +// seed = "SUAKYRHVIOREXV7EUZTBHUHL7NUMHPMAS7QMDU3GTIUWEI5LDNOXD43IZY" +seed, _ := user.Seed() + +// Access the public key which can be shared. +// publicKey = "UD466L6EBCM3YY5HEGHJANNTN4LSKTSUXTH7RILHCKEQMQHTBNLHJJXT" +publicKey, _ := user.PublicKey() + +// Create a full User who can sign and verify from a private seed. +user, _ = nkeys.FromSeed(seed) + +// Create a User who can only verify signatures via a public key. +user, _ = nkeys.FromPublicKey(publicKey) + +// Create a User KeyPair with our own random data. +var rawSeed [32]byte +_, err := io.ReadFull(rand.Reader, rawSeed[:]) // Or some other random source. +user2, _ := nkeys.FromRawSeed(PrefixByteUser, rawSeed) + +``` + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. + diff --git a/vendor/github.com/nats-io/nkeys/TODO.md b/vendor/github.com/nats-io/nkeys/TODO.md new file mode 100644 index 00000000..2649c9e5 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/TODO.md @@ -0,0 +1,5 @@ + +# General + +- [ ] Child key derivation +- [ ] Hardware support, e.g. YubiHSM diff --git a/vendor/github.com/nats-io/nkeys/crc16.go b/vendor/github.com/nats-io/nkeys/crc16.go new file mode 100644 index 00000000..c3356cf9 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/crc16.go @@ -0,0 +1,75 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "errors" +) + +// An implementation of crc16 according to CCITT standards for XMODEM. + +// ErrInvalidChecksum indicates a failed verification. +var ErrInvalidChecksum = errors.New("nkeys: invalid checksum") + +var crc16tab = [256]uint16{ + 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, + 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, + 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, + 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, + 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, + 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, + 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, + 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, + 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, + 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, + 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, + 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, + 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, + 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, + 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, + 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, + 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, + 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, + 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, + 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, + 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, + 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, + 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, + 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, + 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, + 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, + 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, + 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, + 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, + 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, + 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, + 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, +} + +// crc16 returns the 2-byte crc for the data provided. +func crc16(data []byte) uint16 { + var crc uint16 + for _, b := range data { + crc = ((crc << 8) & 0xffff) ^ crc16tab[((crc>>8)^uint16(b))&0x00FF] + } + return crc +} + +// validate will check the calculated crc16 checksum for data against the expected. +func validate(data []byte, expected uint16) error { + if crc16(data) != expected { + return ErrInvalidChecksum + } + return nil +} diff --git a/vendor/github.com/nats-io/nkeys/creds_utils.go b/vendor/github.com/nats-io/nkeys/creds_utils.go new file mode 100644 index 00000000..e1c3d941 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/creds_utils.go @@ -0,0 +1,78 @@ +package nkeys + +import ( + "bytes" + "errors" + "regexp" +) + +var userConfigRE = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}\r?\n))`) + +// ParseDecoratedJWT takes a creds file and returns the JWT portion. +func ParseDecoratedJWT(contents []byte) (string, error) { + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) == 0 { + return string(contents), nil + } + // First result should be the user JWT. + // We copy here so that if the file contained a seed file too we wipe appropriately. + raw := items[0][1] + tmp := make([]byte, len(raw)) + copy(tmp, raw) + return string(tmp), nil +} + +// ParseDecoratedNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. +func ParseDecoratedNKey(contents []byte) (KeyPair, error) { + var seed []byte + + items := userConfigRE.FindAllSubmatch(contents, -1) + if len(items) > 1 { + seed = items[1][1] + } else { + lines := bytes.Split(contents, []byte("\n")) + for _, line := range lines { + if bytes.HasPrefix(bytes.TrimSpace(line), []byte("SO")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SA")) || + bytes.HasPrefix(bytes.TrimSpace(line), []byte("SU")) { + seed = line + break + } + } + } + if seed == nil { + return nil, errors.New("no nkey seed found") + } + if !bytes.HasPrefix(seed, []byte("SO")) && + !bytes.HasPrefix(seed, []byte("SA")) && + !bytes.HasPrefix(seed, []byte("SU")) { + return nil, errors.New("doesn't contain a seed nkey") + } + kp, err := FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} + +// ParseDecoratedUserNKey takes a creds file, finds the NKey portion and creates a +// key pair from it. Similar to ParseDecoratedNKey but fails for non-user keys. +func ParseDecoratedUserNKey(contents []byte) (KeyPair, error) { + nk, err := ParseDecoratedNKey(contents) + if err != nil { + return nil, err + } + seed, err := nk.Seed() + if err != nil { + return nil, err + } + if !bytes.HasPrefix(seed, []byte("SU")) { + return nil, errors.New("doesn't contain an user seed nkey") + } + kp, err := FromSeed(seed) + if err != nil { + return nil, err + } + return kp, nil +} diff --git a/vendor/github.com/nats-io/nkeys/keypair.go b/vendor/github.com/nats-io/nkeys/keypair.go new file mode 100644 index 00000000..acd86743 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/keypair.go @@ -0,0 +1,117 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "bytes" + "crypto/rand" + "io" + + "golang.org/x/crypto/ed25519" +) + +// kp is the internal struct for a kepypair using seed. +type kp struct { + seed []byte +} + +// CreatePair will create a KeyPair based on the rand entropy and a type/prefix byte. rand can be nil. +func CreatePair(prefix PrefixByte) (KeyPair, error) { + var rawSeed [32]byte + + _, err := io.ReadFull(rand.Reader, rawSeed[:]) + if err != nil { + return nil, err + } + + seed, err := EncodeSeed(prefix, rawSeed[:]) + if err != nil { + return nil, err + } + return &kp{seed}, nil +} + +// rawSeed will return the raw, decoded 64 byte seed. +func (pair *kp) rawSeed() ([]byte, error) { + _, raw, err := DecodeSeed(pair.seed) + return raw, err +} + +// keys will return a 32 byte public key and a 64 byte private key utilizing the seed. +func (pair *kp) keys() (ed25519.PublicKey, ed25519.PrivateKey, error) { + raw, err := pair.rawSeed() + if err != nil { + return nil, nil, err + } + return ed25519.GenerateKey(bytes.NewReader(raw)) +} + +// Wipe will randomize the contents of the seed key +func (pair *kp) Wipe() { + io.ReadFull(rand.Reader, pair.seed) + pair.seed = nil +} + +// Seed will return the encoded seed. +func (pair *kp) Seed() ([]byte, error) { + return pair.seed, nil +} + +// PublicKey will return the encoded public key associated with the KeyPair. +// All KeyPairs have a public key. +func (pair *kp) PublicKey() (string, error) { + public, raw, err := DecodeSeed(pair.seed) + if err != nil { + return "", err + } + pub, _, err := ed25519.GenerateKey(bytes.NewReader(raw)) + if err != nil { + return "", err + } + pk, err := Encode(public, pub) + if err != nil { + return "", err + } + return string(pk), nil +} + +// PrivateKey will return the encoded private key for KeyPair. +func (pair *kp) PrivateKey() ([]byte, error) { + _, priv, err := pair.keys() + if err != nil { + return nil, err + } + return Encode(PrefixBytePrivate, priv) +} + +// Sign will sign the input with KeyPair's private key. +func (pair *kp) Sign(input []byte) ([]byte, error) { + _, priv, err := pair.keys() + if err != nil { + return nil, err + } + return ed25519.Sign(priv, input), nil +} + +// Verify will verify the input against a signature utilizing the public key. +func (pair *kp) Verify(input []byte, sig []byte) error { + pub, _, err := pair.keys() + if err != nil { + return err + } + if !ed25519.Verify(pub, input, sig) { + return ErrInvalidSignature + } + return nil +} diff --git a/vendor/github.com/nats-io/nkeys/main.go b/vendor/github.com/nats-io/nkeys/main.go new file mode 100644 index 00000000..47d01c56 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/main.go @@ -0,0 +1,104 @@ +// Copyright 2018-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nkeys is an Ed25519 based public-key signature system that simplifies keys and seeds +// and performs signing and verification. +package nkeys + +import ( + "errors" +) + +// Version is our current version +const Version = "0.3.0" + +// Errors +var ( + ErrInvalidPrefixByte = errors.New("nkeys: invalid prefix byte") + ErrInvalidKey = errors.New("nkeys: invalid key") + ErrInvalidPublicKey = errors.New("nkeys: invalid public key") + ErrInvalidSeedLen = errors.New("nkeys: invalid seed length") + ErrInvalidSeed = errors.New("nkeys: invalid seed") + ErrInvalidEncoding = errors.New("nkeys: invalid encoded key") + ErrInvalidSignature = errors.New("nkeys: signature verification failed") + ErrCannotSign = errors.New("nkeys: can not sign, no private key available") + ErrPublicKeyOnly = errors.New("nkeys: no seed or private key available") + ErrIncompatibleKey = errors.New("nkeys: incompatible key") +) + +// KeyPair provides the central interface to nkeys. +type KeyPair interface { + Seed() ([]byte, error) + PublicKey() (string, error) + PrivateKey() ([]byte, error) + Sign(input []byte) ([]byte, error) + Verify(input []byte, sig []byte) error + Wipe() +} + +// CreateUser will create a User typed KeyPair. +func CreateUser() (KeyPair, error) { + return CreatePair(PrefixByteUser) +} + +// CreateAccount will create an Account typed KeyPair. +func CreateAccount() (KeyPair, error) { + return CreatePair(PrefixByteAccount) +} + +// CreateServer will create a Server typed KeyPair. +func CreateServer() (KeyPair, error) { + return CreatePair(PrefixByteServer) +} + +// CreateCluster will create a Cluster typed KeyPair. +func CreateCluster() (KeyPair, error) { + return CreatePair(PrefixByteCluster) +} + +// CreateOperator will create an Operator typed KeyPair. +func CreateOperator() (KeyPair, error) { + return CreatePair(PrefixByteOperator) +} + +// FromPublicKey will create a KeyPair capable of verifying signatures. +func FromPublicKey(public string) (KeyPair, error) { + raw, err := decode([]byte(public)) + if err != nil { + return nil, err + } + pre := PrefixByte(raw[0]) + if err := checkValidPublicPrefixByte(pre); err != nil { + return nil, ErrInvalidPublicKey + } + return &pub{pre, raw[1:]}, nil +} + +// FromSeed will create a KeyPair capable of signing and verifying signatures. +func FromSeed(seed []byte) (KeyPair, error) { + _, _, err := DecodeSeed(seed) + if err != nil { + return nil, err + } + copy := append([]byte{}, seed...) + return &kp{copy}, nil +} + +// FromRawSeed will create a KeyPair from the raw 32 byte seed for a given type. +func FromRawSeed(prefix PrefixByte, rawSeed []byte) (KeyPair, error) { + seed, err := EncodeSeed(prefix, rawSeed) + if err != nil { + return nil, err + } + return &kp{seed}, nil +} diff --git a/vendor/github.com/nats-io/nkeys/public.go b/vendor/github.com/nats-io/nkeys/public.go new file mode 100644 index 00000000..cb7927a6 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/public.go @@ -0,0 +1,66 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "crypto/rand" + "io" + + "golang.org/x/crypto/ed25519" +) + +// A KeyPair from a public key capable of verifying only. +type pub struct { + pre PrefixByte + pub ed25519.PublicKey +} + +// PublicKey will return the encoded public key associated with the KeyPair. +// All KeyPairs have a public key. +func (p *pub) PublicKey() (string, error) { + pk, err := Encode(p.pre, p.pub) + if err != nil { + return "", err + } + return string(pk), nil +} + +// Seed will return an error since this is not available for public key only KeyPairs. +func (p *pub) Seed() ([]byte, error) { + return nil, ErrPublicKeyOnly +} + +// PrivateKey will return an error since this is not available for public key only KeyPairs. +func (p *pub) PrivateKey() ([]byte, error) { + return nil, ErrPublicKeyOnly +} + +// Sign will return an error since this is not available for public key only KeyPairs. +func (p *pub) Sign(input []byte) ([]byte, error) { + return nil, ErrCannotSign +} + +// Verify will verify the input against a signature utilizing the public key. +func (p *pub) Verify(input []byte, sig []byte) error { + if !ed25519.Verify(p.pub, input, sig) { + return ErrInvalidSignature + } + return nil +} + +// Wipe will randomize the public key and erase the pre byte. +func (p *pub) Wipe() { + p.pre = '0' + io.ReadFull(rand.Reader, p.pub) +} diff --git a/vendor/github.com/nats-io/nkeys/strkey.go b/vendor/github.com/nats-io/nkeys/strkey.go new file mode 100644 index 00000000..324ea638 --- /dev/null +++ b/vendor/github.com/nats-io/nkeys/strkey.go @@ -0,0 +1,306 @@ +// Copyright 2018 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nkeys + +import ( + "bytes" + "encoding/base32" + "encoding/binary" + "golang.org/x/crypto/ed25519" +) + +// PrefixByte is a lead byte representing the type. +type PrefixByte byte + +const ( + // PrefixByteSeed is the version byte used for encoded NATS Seeds + PrefixByteSeed PrefixByte = 18 << 3 // Base32-encodes to 'S...' + + // PrefixBytePrivate is the version byte used for encoded NATS Private keys + PrefixBytePrivate PrefixByte = 15 << 3 // Base32-encodes to 'P...' + + // PrefixByteServer is the version byte used for encoded NATS Servers + PrefixByteServer PrefixByte = 13 << 3 // Base32-encodes to 'N...' + + // PrefixByteCluster is the version byte used for encoded NATS Clusters + PrefixByteCluster PrefixByte = 2 << 3 // Base32-encodes to 'C...' + + // PrefixByteOperator is the version byte used for encoded NATS Operators + PrefixByteOperator PrefixByte = 14 << 3 // Base32-encodes to 'O...' + + // PrefixByteAccount is the version byte used for encoded NATS Accounts + PrefixByteAccount PrefixByte = 0 // Base32-encodes to 'A...' + + // PrefixByteUser is the version byte used for encoded NATS Users + PrefixByteUser PrefixByte = 20 << 3 // Base32-encodes to 'U...' + + // PrefixByteUnknown is for unknown prefixes. + PrefixByteUnknown PrefixByte = 23 << 3 // Base32-encodes to 'X...' +) + +// Set our encoding to not include padding '==' +var b32Enc = base32.StdEncoding.WithPadding(base32.NoPadding) + +// Encode will encode a raw key or seed with the prefix and crc16 and then base32 encoded. +func Encode(prefix PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPrefixByte(prefix); err != nil { + return nil, err + } + + var raw bytes.Buffer + + // write prefix byte + if err := raw.WriteByte(byte(prefix)); err != nil { + return nil, err + } + + // write payload + if _, err := raw.Write(src); err != nil { + return nil, err + } + + // Calculate and write crc16 checksum + err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) + if err != nil { + return nil, err + } + + data := raw.Bytes() + buf := make([]byte, b32Enc.EncodedLen(len(data))) + b32Enc.Encode(buf, data) + return buf[:], nil +} + +// EncodeSeed will encode a raw key with the prefix and then seed prefix and crc16 and then base32 encoded. +func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPublicPrefixByte(public); err != nil { + return nil, err + } + + if len(src) != ed25519.SeedSize { + return nil, ErrInvalidSeedLen + } + + // In order to make this human printable for both bytes, we need to do a little + // bit manipulation to setup for base32 encoding which takes 5 bits at a time. + b1 := byte(PrefixByteSeed) | (byte(public) >> 5) + b2 := (byte(public) & 31) << 3 // 31 = 00011111 + + var raw bytes.Buffer + + raw.WriteByte(b1) + raw.WriteByte(b2) + + // write payload + if _, err := raw.Write(src); err != nil { + return nil, err + } + + // Calculate and write crc16 checksum + err := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes())) + if err != nil { + return nil, err + } + + data := raw.Bytes() + buf := make([]byte, b32Enc.EncodedLen(len(data))) + b32Enc.Encode(buf, data) + return buf, nil +} + +// IsValidEncoding will tell you if the encoding is a valid key. +func IsValidEncoding(src []byte) bool { + _, err := decode(src) + return err == nil +} + +// decode will decode the base32 and check crc16 and the prefix for validity. +func decode(src []byte) ([]byte, error) { + raw := make([]byte, b32Enc.DecodedLen(len(src))) + n, err := b32Enc.Decode(raw, src) + if err != nil { + return nil, err + } + raw = raw[:n] + + if len(raw) < 4 { + return nil, ErrInvalidEncoding + } + + var crc uint16 + checksum := bytes.NewReader(raw[len(raw)-2:]) + if err := binary.Read(checksum, binary.LittleEndian, &crc); err != nil { + return nil, err + } + + // ensure checksum is valid + if err := validate(raw[0:len(raw)-2], crc); err != nil { + return nil, err + } + + return raw[:len(raw)-2], nil +} + +// Decode will decode the base32 string and check crc16 and enforce the prefix is what is expected. +func Decode(expectedPrefix PrefixByte, src []byte) ([]byte, error) { + if err := checkValidPrefixByte(expectedPrefix); err != nil { + return nil, err + } + raw, err := decode(src) + if err != nil { + return nil, err + } + if prefix := PrefixByte(raw[0]); prefix != expectedPrefix { + return nil, ErrInvalidPrefixByte + } + return raw[1:], nil +} + +// DecodeSeed will decode the base32 string and check crc16 and enforce the prefix is a seed +// and the subsequent type is a valid type. +func DecodeSeed(src []byte) (PrefixByte, []byte, error) { + raw, err := decode(src) + if err != nil { + return PrefixByteSeed, nil, err + } + // Need to do the reverse here to get back to internal representation. + b1 := raw[0] & 248 // 248 = 11111000 + b2 := (raw[0]&7)<<5 | ((raw[1] & 248) >> 3) // 7 = 00000111 + + if PrefixByte(b1) != PrefixByteSeed { + return PrefixByteSeed, nil, ErrInvalidSeed + } + if checkValidPublicPrefixByte(PrefixByte(b2)) != nil { + return PrefixByteSeed, nil, ErrInvalidSeed + } + return PrefixByte(b2), raw[2:], nil +} + +// Prefix returns PrefixBytes of its input +func Prefix(src string) PrefixByte { + b, err := decode([]byte(src)) + if err != nil { + return PrefixByteUnknown + } + prefix := PrefixByte(b[0]) + err = checkValidPrefixByte(prefix) + if err == nil { + return prefix + } + // Might be a seed. + b1 := b[0] & 248 + if PrefixByte(b1) == PrefixByteSeed { + return PrefixByteSeed + } + return PrefixByteUnknown +} + +// IsValidPublicKey will decode and verify that the string is a valid encoded public key. +func IsValidPublicKey(src string) bool { + b, err := decode([]byte(src)) + if err != nil { + return false + } + if prefix := PrefixByte(b[0]); checkValidPublicPrefixByte(prefix) != nil { + return false + } + return true +} + +// IsValidPublicUserKey will decode and verify the string is a valid encoded Public User Key. +func IsValidPublicUserKey(src string) bool { + _, err := Decode(PrefixByteUser, []byte(src)) + return err == nil +} + +// IsValidPublicAccountKey will decode and verify the string is a valid encoded Public Account Key. +func IsValidPublicAccountKey(src string) bool { + _, err := Decode(PrefixByteAccount, []byte(src)) + return err == nil +} + +// IsValidPublicServerKey will decode and verify the string is a valid encoded Public Server Key. +func IsValidPublicServerKey(src string) bool { + _, err := Decode(PrefixByteServer, []byte(src)) + return err == nil +} + +// IsValidPublicClusterKey will decode and verify the string is a valid encoded Public Cluster Key. +func IsValidPublicClusterKey(src string) bool { + _, err := Decode(PrefixByteCluster, []byte(src)) + return err == nil +} + +// IsValidPublicOperatorKey will decode and verify the string is a valid encoded Public Operator Key. +func IsValidPublicOperatorKey(src string) bool { + _, err := Decode(PrefixByteOperator, []byte(src)) + return err == nil +} + +// checkValidPrefixByte returns an error if the provided value +// is not one of the defined valid prefix byte constants. +func checkValidPrefixByte(prefix PrefixByte) error { + switch prefix { + case PrefixByteOperator, PrefixByteServer, PrefixByteCluster, + PrefixByteAccount, PrefixByteUser, PrefixByteSeed, PrefixBytePrivate: + return nil + } + return ErrInvalidPrefixByte +} + +// checkValidPublicPrefixByte returns an error if the provided value +// is not one of the public defined valid prefix byte constants. +func checkValidPublicPrefixByte(prefix PrefixByte) error { + switch prefix { + case PrefixByteServer, PrefixByteCluster, PrefixByteOperator, PrefixByteAccount, PrefixByteUser: + return nil + } + return ErrInvalidPrefixByte +} + +func (p PrefixByte) String() string { + switch p { + case PrefixByteOperator: + return "operator" + case PrefixByteServer: + return "server" + case PrefixByteCluster: + return "cluster" + case PrefixByteAccount: + return "account" + case PrefixByteUser: + return "user" + case PrefixByteSeed: + return "seed" + case PrefixBytePrivate: + return "private" + } + return "unknown" +} + +// CompatibleKeyPair returns an error if the KeyPair doesn't match expected PrefixByte(s) +func CompatibleKeyPair(kp KeyPair, expected ...PrefixByte) error { + pk, err := kp.PublicKey() + if err != nil { + return err + } + pkType := Prefix(pk) + for _, k := range expected { + if pkType == k { + return nil + } + } + + return ErrIncompatibleKey +} diff --git a/vendor/github.com/nats-io/nuid/.gitignore b/vendor/github.com/nats-io/nuid/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/nats-io/nuid/.travis.yml b/vendor/github.com/nats-io/nuid/.travis.yml new file mode 100644 index 00000000..52be7265 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/.travis.yml @@ -0,0 +1,17 @@ +language: go +sudo: false +go: +- 1.9.x +- 1.10.x + +install: +- go get -t ./... +- go get github.com/mattn/goveralls + +script: +- go fmt ./... +- go vet ./... +- go test -v +- go test -v --race +- go test -v -covermode=count -coverprofile=coverage.out +- $HOME/gopath/bin/goveralls -coverprofile coverage.out -service travis-ci diff --git a/vendor/github.com/nats-io/nuid/GOVERNANCE.md b/vendor/github.com/nats-io/nuid/GOVERNANCE.md new file mode 100644 index 00000000..01aee70d --- /dev/null +++ b/vendor/github.com/nats-io/nuid/GOVERNANCE.md @@ -0,0 +1,3 @@ +# NATS NUID Governance + +NATS NUID is part of the NATS project and is subject to the [NATS Governance](https://github.com/nats-io/nats-general/blob/master/GOVERNANCE.md). \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/LICENSE b/vendor/github.com/nats-io/nuid/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/nats-io/nuid/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/nats-io/nuid/MAINTAINERS.md b/vendor/github.com/nats-io/nuid/MAINTAINERS.md new file mode 100644 index 00000000..6d0ed3e3 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/MAINTAINERS.md @@ -0,0 +1,6 @@ +# Maintainers + +Maintainership is on a per project basis. + +### Core-maintainers + - Derek Collison [@derekcollison](https://github.com/derekcollison) \ No newline at end of file diff --git a/vendor/github.com/nats-io/nuid/README.md b/vendor/github.com/nats-io/nuid/README.md new file mode 100644 index 00000000..16e53948 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/README.md @@ -0,0 +1,47 @@ +# NUID + +[![License Apache 2](https://img.shields.io/badge/License-Apache2-blue.svg)](https://www.apache.org/licenses/LICENSE-2.0) +[![ReportCard](http://goreportcard.com/badge/nats-io/nuid)](http://goreportcard.com/report/nats-io/nuid) +[![Build Status](https://travis-ci.org/nats-io/nuid.svg?branch=master)](http://travis-ci.org/nats-io/nuid) +[![Release](https://img.shields.io/badge/release-v1.0.1-1eb0fc.svg)](https://github.com/nats-io/nuid/releases/tag/v1.0.1) +[![GoDoc](http://godoc.org/github.com/nats-io/nuid?status.png)](http://godoc.org/github.com/nats-io/nuid) +[![Coverage Status](https://coveralls.io/repos/github/nats-io/nuid/badge.svg?branch=master)](https://coveralls.io/github/nats-io/nuid?branch=master) + +A highly performant unique identifier generator. + +## Installation + +Use the `go` command: + + $ go get github.com/nats-io/nuid + +## Basic Usage +```go + +// Utilize the global locked instance +nuid := nuid.Next() + +// Create an instance, these are not locked. +n := nuid.New() +nuid = n.Next() + +// Generate a new crypto/rand seeded prefix. +// Generally not needed, happens automatically. +n.RandomizePrefix() +``` + +## Performance +NUID needs to be very fast to generate and be truly unique, all while being entropy pool friendly. +NUID uses 12 bytes of crypto generated data (entropy draining), and 10 bytes of pseudo-random +sequential data that increments with a pseudo-random increment. + +Total length of a NUID string is 22 bytes of base 62 ascii text, so 62^22 or +2707803647802660400290261537185326956544 possibilities. + +NUID can generate identifiers as fast as 60ns, or ~16 million per second. There is an associated +benchmark you can use to test performance on your own hardware. + +## License + +Unless otherwise noted, the NATS source files are distributed +under the Apache Version 2.0 license found in the LICENSE file. diff --git a/vendor/github.com/nats-io/nuid/nuid.go b/vendor/github.com/nats-io/nuid/nuid.go new file mode 100644 index 00000000..8134c764 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/nuid.go @@ -0,0 +1,135 @@ +// Copyright 2016-2019 The NATS Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly. +package nuid + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "sync" + "time" + + prand "math/rand" +) + +// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly. +// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data +// that is started at a pseudo random number and increments with a pseudo-random increment. +// Total is 22 bytes of base 62 ascii text :) + +// Version of the library +const Version = "1.0.1" + +const ( + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 + preLen = 12 + seqLen = 10 + maxSeq = int64(839299365868340224) // base^seqLen == 62^10 + minInc = int64(33) + maxInc = int64(333) + totalLen = preLen + seqLen +) + +type NUID struct { + pre []byte + seq int64 + inc int64 +} + +type lockedNUID struct { + sync.Mutex + *NUID +} + +// Global NUID +var globalNUID *lockedNUID + +// Seed sequential random with crypto or math/random and current time +// and generate crypto prefix. +func init() { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + prand.Seed(time.Now().UnixNano()) + } else { + prand.Seed(r.Int64()) + } + globalNUID = &lockedNUID{NUID: New()} + globalNUID.RandomizePrefix() +} + +// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. +func New() *NUID { + n := &NUID{ + seq: prand.Int63n(maxSeq), + inc: minInc + prand.Int63n(maxInc-minInc), + pre: make([]byte, preLen), + } + n.RandomizePrefix() + return n +} + +// Generate the next NUID string from the global locked NUID instance. +func Next() string { + globalNUID.Lock() + nuid := globalNUID.Next() + globalNUID.Unlock() + return nuid +} + +// Generate the next NUID string. +func (n *NUID) Next() string { + // Increment and capture. + n.seq += n.inc + if n.seq >= maxSeq { + n.RandomizePrefix() + n.resetSequential() + } + seq := n.seq + + // Copy prefix + var b [totalLen]byte + bs := b[:preLen] + copy(bs, n.pre) + + // copy in the seq in base62. + for i, l := len(b), seq; i > preLen; l /= base { + i -= 1 + b[i] = digits[l%base] + } + return string(b[:]) +} + +// Resets the sequential portion of the NUID. +func (n *NUID) resetSequential() { + n.seq = prand.Int63n(maxSeq) + n.inc = minInc + prand.Int63n(maxInc-minInc) +} + +// Generate a new prefix from crypto/rand. +// This call *can* drain entropy and will be called automatically when we exhaust the sequential range. +// Will panic if it gets an error from rand.Int() +func (n *NUID) RandomizePrefix() { + var cb [preLen]byte + cbs := cb[:] + if nb, err := rand.Read(cbs); nb != preLen || err != nil { + panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err)) + } + + for i := 0; i < preLen; i++ { + n.pre[i] = digits[int(cbs[i])%base] + } +} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/vendor/github.com/pkg/errors/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml new file mode 100644 index 00000000..9159de03 --- /dev/null +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -0,0 +1,10 @@ +language: go +go_import_path: github.com/pkg/errors +go: + - 1.11.x + - 1.12.x + - 1.13.x + - tip + +script: + - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 00000000..835ba3e7 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 00000000..ce9d7cde --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md new file mode 100644 index 00000000..54dfdcb1 --- /dev/null +++ b/vendor/github.com/pkg/errors/README.md @@ -0,0 +1,59 @@ +# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) + +Package errors provides simple error handling primitives. + +`go get github.com/pkg/errors` + +The traditional error handling idiom in Go is roughly akin to +```go +if err != nil { + return err +} +``` +which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. + +## Adding context to an error + +The errors.Wrap function returns a new error that adds context to the original error. For example +```go +_, err := ioutil.ReadAll(r) +if err != nil { + return errors.Wrap(err, "read failed") +} +``` +## Retrieving the cause of an error + +Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. +```go +type causer interface { + Cause() error +} +``` +`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: +```go +switch err := errors.Cause(err).(type) { +case *MyError: + // handle specifically +default: + // unknown error +} +``` + +[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). + +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + +## Contributing + +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. + +Before sending a PR, please discuss your change by raising an issue. + +## License + +BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml new file mode 100644 index 00000000..a932eade --- /dev/null +++ b/vendor/github.com/pkg/errors/appveyor.yml @@ -0,0 +1,32 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\pkg\errors +shallow_clone: true # for startup speed + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +# http://www.appveyor.com/docs/installed-software +install: + # some helpful output for debugging builds + - go version + - go env + # pre-installed MinGW at C:\MinGW is 32bit only + # but MSYS2 at C:\msys64 has mingw64 + - set PATH=C:\msys64\mingw64\bin;%PATH% + - gcc --version + - g++ --version + +build_script: + - go install -v ./... + +test_script: + - set PATH=C:\gopath\bin;%PATH% + - go test -v ./... + +#artifacts: +# - path: '%GOPATH%\bin\*.exe' +deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 00000000..161aea25 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,288 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which when applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// together with the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error that does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported: +// +// %s print the error. If the error has a Cause it will be +// printed recursively. +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface: +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// The returned errors.StackTrace type is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d\n", f, f) +// } +// } +// +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is called, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 00000000..be0d10d0 --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 00000000..779a8348 --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,177 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +// Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + io.WriteString(s, strconv.Itoa(f.line())) + case 'n': + io.WriteString(s, funcname(f.name())) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + io.WriteString(s, "\n") + f.Format(s, verb) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + st.formatSlice(s, verb) + } + case 's': + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/vendor/github.com/spf13/afero/.gitignore b/vendor/github.com/spf13/afero/.gitignore new file mode 100644 index 00000000..9c1d9861 --- /dev/null +++ b/vendor/github.com/spf13/afero/.gitignore @@ -0,0 +1,2 @@ +sftpfs/file1 +sftpfs/test/ diff --git a/vendor/github.com/spf13/afero/LICENSE.txt b/vendor/github.com/spf13/afero/LICENSE.txt new file mode 100644 index 00000000..298f0e26 --- /dev/null +++ b/vendor/github.com/spf13/afero/LICENSE.txt @@ -0,0 +1,174 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md new file mode 100644 index 00000000..3bafbfdf --- /dev/null +++ b/vendor/github.com/spf13/afero/README.md @@ -0,0 +1,442 @@ +![afero logo-sm](https://cloud.githubusercontent.com/assets/173412/11490338/d50e16dc-97a5-11e5-8b12-019a300d0fcb.png) + +A FileSystem Abstraction System for Go + +[![Test](https://github.com/spf13/afero/actions/workflows/test.yml/badge.svg)](https://github.com/spf13/afero/actions/workflows/test.yml) [![GoDoc](https://godoc.org/github.com/spf13/afero?status.svg)](https://godoc.org/github.com/spf13/afero) [![Join the chat at https://gitter.im/spf13/afero](https://badges.gitter.im/Dev%20Chat.svg)](https://gitter.im/spf13/afero?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +# Overview + +Afero is a filesystem framework providing a simple, uniform and universal API +interacting with any filesystem, as an abstraction layer providing interfaces, +types and methods. Afero has an exceptionally clean interface and simple design +without needless constructors or initialization methods. + +Afero is also a library providing a base set of interoperable backend +filesystems that make it easy to work with afero while retaining all the power +and benefit of the os and ioutil packages. + +Afero provides significant improvements over using the os package alone, most +notably the ability to create mock and testing filesystems without relying on the disk. + +It is suitable for use in any situation where you would consider using the OS +package as it provides an additional abstraction that makes it easy to use a +memory backed file system during testing. It also adds support for the http +filesystem for full interoperability. + + +## Afero Features + +* A single consistent API for accessing a variety of filesystems +* Interoperation between a variety of file system types +* A set of interfaces to encourage and enforce interoperability between backends +* An atomic cross platform memory backed file system +* Support for compositional (union) file systems by combining multiple file systems acting as one +* Specialized backends which modify existing filesystems (Read Only, Regexp filtered) +* A set of utility functions ported from io, ioutil & hugo to be afero aware +* Wrapper for go 1.16 filesystem abstraction `io/fs.FS` + +# Using Afero + +Afero is easy to use and easier to adopt. + +A few different ways you could use Afero: + +* Use the interfaces alone to define your own file system. +* Wrapper for the OS packages. +* Define different filesystems for different parts of your application. +* Use Afero for mock filesystems while testing + +## Step 1: Install Afero + +First use go get to install the latest version of the library. + + $ go get github.com/spf13/afero + +Next include Afero in your application. +```go +import "github.com/spf13/afero" +``` + +## Step 2: Declare a backend + +First define a package variable and set it to a pointer to a filesystem. +```go +var AppFs = afero.NewMemMapFs() + +or + +var AppFs = afero.NewOsFs() +``` +It is important to note that if you repeat the composite literal you +will be using a completely new and isolated filesystem. In the case of +OsFs it will still use the same underlying filesystem but will reduce +the ability to drop in other filesystems as desired. + +## Step 3: Use it like you would the OS package + +Throughout your application use any function and method like you normally +would. + +So if my application before had: +```go +os.Open("/tmp/foo") +``` +We would replace it with: +```go +AppFs.Open("/tmp/foo") +``` + +`AppFs` being the variable we defined above. + + +## List of all available functions + +File System Methods Available: +```go +Chmod(name string, mode os.FileMode) : error +Chown(name string, uid, gid int) : error +Chtimes(name string, atime time.Time, mtime time.Time) : error +Create(name string) : File, error +Mkdir(name string, perm os.FileMode) : error +MkdirAll(path string, perm os.FileMode) : error +Name() : string +Open(name string) : File, error +OpenFile(name string, flag int, perm os.FileMode) : File, error +Remove(name string) : error +RemoveAll(path string) : error +Rename(oldname, newname string) : error +Stat(name string) : os.FileInfo, error +``` +File Interfaces and Methods Available: +```go +io.Closer +io.Reader +io.ReaderAt +io.Seeker +io.Writer +io.WriterAt + +Name() : string +Readdir(count int) : []os.FileInfo, error +Readdirnames(n int) : []string, error +Stat() : os.FileInfo, error +Sync() : error +Truncate(size int64) : error +WriteString(s string) : ret int, err error +``` +In some applications it may make sense to define a new package that +simply exports the file system variable for easy access from anywhere. + +## Using Afero's utility functions + +Afero provides a set of functions to make it easier to use the underlying file systems. +These functions have been primarily ported from io & ioutil with some developed for Hugo. + +The afero utilities support all afero compatible backends. + +The list of utilities includes: + +```go +DirExists(path string) (bool, error) +Exists(path string) (bool, error) +FileContainsBytes(filename string, subslice []byte) (bool, error) +GetTempDir(subPath string) string +IsDir(path string) (bool, error) +IsEmpty(path string) (bool, error) +ReadDir(dirname string) ([]os.FileInfo, error) +ReadFile(filename string) ([]byte, error) +SafeWriteReader(path string, r io.Reader) (err error) +TempDir(dir, prefix string) (name string, err error) +TempFile(dir, prefix string) (f File, err error) +Walk(root string, walkFn filepath.WalkFunc) error +WriteFile(filename string, data []byte, perm os.FileMode) error +WriteReader(path string, r io.Reader) (err error) +``` +For a complete list see [Afero's GoDoc](https://godoc.org/github.com/spf13/afero) + +They are available under two different approaches to use. You can either call +them directly where the first parameter of each function will be the file +system, or you can declare a new `Afero`, a custom type used to bind these +functions as methods to a given filesystem. + +### Calling utilities directly + +```go +fs := new(afero.MemMapFs) +f, err := afero.TempFile(fs,"", "ioutil-test") + +``` + +### Calling via Afero + +```go +fs := afero.NewMemMapFs() +afs := &afero.Afero{Fs: fs} +f, err := afs.TempFile("", "ioutil-test") +``` + +## Using Afero for Testing + +There is a large benefit to using a mock filesystem for testing. It has a +completely blank state every time it is initialized and can be easily +reproducible regardless of OS. You could create files to your heart’s content +and the file access would be fast while also saving you from all the annoying +issues with deleting temporary files, Windows file locking, etc. The MemMapFs +backend is perfect for testing. + +* Much faster than performing I/O operations on disk +* Avoid security issues and permissions +* Far more control. 'rm -rf /' with confidence +* Test setup is far more easier to do +* No test cleanup needed + +One way to accomplish this is to define a variable as mentioned above. +In your application this will be set to afero.NewOsFs() during testing you +can set it to afero.NewMemMapFs(). + +It wouldn't be uncommon to have each test initialize a blank slate memory +backend. To do this I would define my `appFS = afero.NewOsFs()` somewhere +appropriate in my application code. This approach ensures that Tests are order +independent, with no test relying on the state left by an earlier test. + +Then in my tests I would initialize a new MemMapFs for each test: +```go +func TestExist(t *testing.T) { + appFS := afero.NewMemMapFs() + // create test files and directories + appFS.MkdirAll("src/a", 0755) + afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) + afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) + name := "src/c" + _, err := appFS.Stat(name) + if os.IsNotExist(err) { + t.Errorf("file \"%s\" does not exist.\n", name) + } +} +``` + +# Available Backends + +## Operating System Native + +### OsFs + +The first is simply a wrapper around the native OS calls. This makes it +very easy to use as all of the calls are the same as the existing OS +calls. It also makes it trivial to have your code use the OS during +operation and a mock filesystem during testing or as needed. + +```go +appfs := afero.NewOsFs() +appfs.MkdirAll("src/a", 0755) +``` + +## Memory Backed Storage + +### MemMapFs + +Afero also provides a fully atomic memory backed filesystem perfect for use in +mocking and to speed up unnecessary disk io when persistence isn’t +necessary. It is fully concurrent and will work within go routines +safely. + +```go +mm := afero.NewMemMapFs() +mm.MkdirAll("src/a", 0755) +``` + +#### InMemoryFile + +As part of MemMapFs, Afero also provides an atomic, fully concurrent memory +backed file implementation. This can be used in other memory backed file +systems with ease. Plans are to add a radix tree memory stored file +system using InMemoryFile. + +## Network Interfaces + +### SftpFs + +Afero has experimental support for secure file transfer protocol (sftp). Which can +be used to perform file operations over a encrypted channel. + +### GCSFs + +Afero has experimental support for Google Cloud Storage (GCS). You can either set the +`GOOGLE_APPLICATION_CREDENTIALS_JSON` env variable to your JSON credentials or use `opts` in +`NewGcsFS` to configure access to your GCS bucket. + +Some known limitations of the existing implementation: +* No Chmod support - The GCS ACL could probably be mapped to *nix style permissions but that would add another level of complexity and is ignored in this version. +* No Chtimes support - Could be simulated with attributes (gcs a/m-times are set implicitly) but that's is left for another version. +* Not thread safe - Also assumes all file operations are done through the same instance of the GcsFs. File operations between different GcsFs instances are not guaranteed to be consistent. + + +## Filtering Backends + +### BasePathFs + +The BasePathFs restricts all operations to a given path within an Fs. +The given file name to the operations on this Fs will be prepended with +the base path before calling the source Fs. + +```go +bp := afero.NewBasePathFs(afero.NewOsFs(), "/base/path") +``` + +### ReadOnlyFs + +A thin wrapper around the source Fs providing a read only view. + +```go +fs := afero.NewReadOnlyFs(afero.NewOsFs()) +_, err := fs.Create("/file.txt") +// err = syscall.EPERM +``` + +# RegexpFs + +A filtered view on file names, any file NOT matching +the passed regexp will be treated as non-existing. +Files not matching the regexp provided will not be created. +Directories are not filtered. + +```go +fs := afero.NewRegexpFs(afero.NewMemMapFs(), regexp.MustCompile(`\.txt$`)) +_, err := fs.Create("/file.html") +// err = syscall.ENOENT +``` + +### HttpFs + +Afero provides an http compatible backend which can wrap any of the existing +backends. + +The Http package requires a slightly specific version of Open which +returns an http.File type. + +Afero provides an httpFs file system which satisfies this requirement. +Any Afero FileSystem can be used as an httpFs. + +```go +httpFs := afero.NewHttpFs() +fileserver := http.FileServer(httpFs.Dir()) +http.Handle("/", fileserver) +``` + +## Composite Backends + +Afero provides the ability have two filesystems (or more) act as a single +file system. + +### CacheOnReadFs + +The CacheOnReadFs will lazily make copies of any accessed files from the base +layer into the overlay. Subsequent reads will be pulled from the overlay +directly permitting the request is within the cache duration of when it was +created in the overlay. + +If the base filesystem is writeable, any changes to files will be +done first to the base, then to the overlay layer. Write calls to open file +handles like `Write()` or `Truncate()` to the overlay first. + +To writing files to the overlay only, you can use the overlay Fs directly (not +via the union Fs). + +Cache files in the layer for the given time.Duration, a cache duration of 0 +means "forever" meaning the file will not be re-requested from the base ever. + +A read-only base will make the overlay also read-only but still copy files +from the base to the overlay when they're not present (or outdated) in the +caching layer. + +```go +base := afero.NewOsFs() +layer := afero.NewMemMapFs() +ufs := afero.NewCacheOnReadFs(base, layer, 100 * time.Second) +``` + +### CopyOnWriteFs() + +The CopyOnWriteFs is a read only base file system with a potentially +writeable layer on top. + +Read operations will first look in the overlay and if not found there, will +serve the file from the base. + +Changes to the file system will only be made in the overlay. + +Any attempt to modify a file found only in the base will copy the file to the +overlay layer before modification (including opening a file with a writable +handle). + +Removing and Renaming files present only in the base layer is not currently +permitted. If a file is present in the base layer and the overlay, only the +overlay will be removed/renamed. + +```go + base := afero.NewOsFs() + roBase := afero.NewReadOnlyFs(base) + ufs := afero.NewCopyOnWriteFs(roBase, afero.NewMemMapFs()) + + fh, _ = ufs.Create("/home/test/file2.txt") + fh.WriteString("This is a test") + fh.Close() +``` + +In this example all write operations will only occur in memory (MemMapFs) +leaving the base filesystem (OsFs) untouched. + + +## Desired/possible backends + +The following is a short list of possible backends we hope someone will +implement: + +* SSH +* S3 + +# About the project + +## What's in the name + +Afero comes from the latin roots Ad-Facere. + +**"Ad"** is a prefix meaning "to". + +**"Facere"** is a form of the root "faciō" making "make or do". + +The literal meaning of afero is "to make" or "to do" which seems very fitting +for a library that allows one to make files and directories and do things with them. + +The English word that shares the same roots as Afero is "affair". Affair shares +the same concept but as a noun it means "something that is made or done" or "an +object of a particular type". + +It's also nice that unlike some of my other libraries (hugo, cobra, viper) it +Googles very well. + +## Release Notes + +See the [Releases Page](https://github.com/spf13/afero/releases). + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13) +* [jaqx0r](https://github.com/jaqx0r) +* [mbertschler](https://github.com/mbertschler) +* [xor-gate](https://github.com/xor-gate) + +## License + +Afero is released under the Apache 2.0 license. See +[LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go new file mode 100644 index 00000000..39f65852 --- /dev/null +++ b/vendor/github.com/spf13/afero/afero.go @@ -0,0 +1,111 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package afero provides types and methods for interacting with the filesystem, +// as an abstraction layer. + +// Afero also provides a few implementations that are mostly interoperable. One that +// uses the operating system filesystem, one that uses memory to store files +// (cross platform) and an interface that should be implemented if you want to +// provide your own filesystem. + +package afero + +import ( + "errors" + "io" + "os" + "time" +) + +type Afero struct { + Fs +} + +// File represents a file in the filesystem. +type File interface { + io.Closer + io.Reader + io.ReaderAt + io.Seeker + io.Writer + io.WriterAt + + Name() string + Readdir(count int) ([]os.FileInfo, error) + Readdirnames(n int) ([]string, error) + Stat() (os.FileInfo, error) + Sync() error + Truncate(size int64) error + WriteString(s string) (ret int, err error) +} + +// Fs is the filesystem interface. +// +// Any simulated or real filesystem should implement this interface. +type Fs interface { + // Create creates a file in the filesystem, returning the file and an + // error, if any happens. + Create(name string) (File, error) + + // Mkdir creates a directory in the filesystem, return an error if any + // happens. + Mkdir(name string, perm os.FileMode) error + + // MkdirAll creates a directory path and all parents that does not exist + // yet. + MkdirAll(path string, perm os.FileMode) error + + // Open opens a file, returning it or an error, if any happens. + Open(name string) (File, error) + + // OpenFile opens a file using the given flags and the given mode. + OpenFile(name string, flag int, perm os.FileMode) (File, error) + + // Remove removes a file identified by name, returning an error, if any + // happens. + Remove(name string) error + + // RemoveAll removes a directory path and any children it contains. It + // does not fail if the path does not exist (return nil). + RemoveAll(path string) error + + // Rename renames a file. + Rename(oldname, newname string) error + + // Stat returns a FileInfo describing the named file, or an error, if any + // happens. + Stat(name string) (os.FileInfo, error) + + // The name of this FileSystem + Name() string + + // Chmod changes the mode of the named file to mode. + Chmod(name string, mode os.FileMode) error + + // Chown changes the uid and gid of the named file. + Chown(name string, uid, gid int) error + + // Chtimes changes the access and modification times of the named file + Chtimes(name string, atime time.Time, mtime time.Time) error +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml new file mode 100644 index 00000000..65e20e8c --- /dev/null +++ b/vendor/github.com/spf13/afero/appveyor.yml @@ -0,0 +1,10 @@ +# This currently does nothing. We have moved to GitHub action, but this is kept +# until spf13 has disabled this project in AppVeyor. +version: '{build}' +clone_folder: C:\gopath\src\github.com\spf13\afero +environment: + GOPATH: C:\gopath +build_script: +- cmd: >- + go version + diff --git a/vendor/github.com/spf13/afero/basepath.go b/vendor/github.com/spf13/afero/basepath.go new file mode 100644 index 00000000..2e72793a --- /dev/null +++ b/vendor/github.com/spf13/afero/basepath.go @@ -0,0 +1,222 @@ +package afero + +import ( + "io/fs" + "os" + "path/filepath" + "runtime" + "strings" + "time" +) + +var ( + _ Lstater = (*BasePathFs)(nil) + _ fs.ReadDirFile = (*BasePathFile)(nil) +) + +// The BasePathFs restricts all operations to a given path within an Fs. +// The given file name to the operations on this Fs will be prepended with +// the base path before calling the base Fs. +// Any file name (after filepath.Clean()) outside this base path will be +// treated as non existing file. +// +// Note that it does not clean the error messages on return, so you may +// reveal the real path on errors. +type BasePathFs struct { + source Fs + path string +} + +type BasePathFile struct { + File + path string +} + +func (f *BasePathFile) Name() string { + sourcename := f.File.Name() + return strings.TrimPrefix(sourcename, filepath.Clean(f.path)) +} + +func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) { + if rdf, ok := f.File.(fs.ReadDirFile); ok { + return rdf.ReadDir(n) + } + return readDirFile{f.File}.ReadDir(n) +} + +func NewBasePathFs(source Fs, path string) Fs { + return &BasePathFs{source: source, path: path} +} + +// on a file outside the base path it returns the given file name and an error, +// else the given file with the base path prepended +func (b *BasePathFs) RealPath(name string) (path string, err error) { + if err := validateBasePathName(name); err != nil { + return name, err + } + + bpath := filepath.Clean(b.path) + path = filepath.Clean(filepath.Join(bpath, name)) + if !strings.HasPrefix(path, bpath) { + return name, os.ErrNotExist + } + + return path, nil +} + +func validateBasePathName(name string) error { + if runtime.GOOS != "windows" { + // Not much to do here; + // the virtual file paths all look absolute on *nix. + return nil + } + + // On Windows a common mistake would be to provide an absolute OS path + // We could strip out the base part, but that would not be very portable. + if filepath.IsAbs(name) { + return os.ErrNotExist + } + + return nil +} + +func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chtimes", Path: name, Err: err} + } + return b.source.Chtimes(name, atime, mtime) +} + +func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chmod", Path: name, Err: err} + } + return b.source.Chmod(name, mode) +} + +func (b *BasePathFs) Chown(name string, uid, gid int) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "chown", Path: name, Err: err} + } + return b.source.Chown(name, uid, gid) +} + +func (b *BasePathFs) Name() string { + return "BasePathFs" +} + +func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "stat", Path: name, Err: err} + } + return b.source.Stat(name) +} + +func (b *BasePathFs) Rename(oldname, newname string) (err error) { + if oldname, err = b.RealPath(oldname); err != nil { + return &os.PathError{Op: "rename", Path: oldname, Err: err} + } + if newname, err = b.RealPath(newname); err != nil { + return &os.PathError{Op: "rename", Path: newname, Err: err} + } + return b.source.Rename(oldname, newname) +} + +func (b *BasePathFs) RemoveAll(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove_all", Path: name, Err: err} + } + return b.source.RemoveAll(name) +} + +func (b *BasePathFs) Remove(name string) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + return b.source.Remove(name) +} + +func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "openfile", Path: name, Err: err} + } + sourcef, err := b.source.OpenFile(name, flag, mode) + if err != nil { + return nil, err + } + return &BasePathFile{sourcef, b.path}, nil +} + +func (b *BasePathFs) Open(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "open", Path: name, Err: err} + } + sourcef, err := b.source.Open(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.Mkdir(name, mode) +} + +func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) { + if name, err = b.RealPath(name); err != nil { + return &os.PathError{Op: "mkdir", Path: name, Err: err} + } + return b.source.MkdirAll(name, mode) +} + +func (b *BasePathFs) Create(name string) (f File, err error) { + if name, err = b.RealPath(name); err != nil { + return nil, &os.PathError{Op: "create", Path: name, Err: err} + } + sourcef, err := b.source.Create(name) + if err != nil { + return nil, err + } + return &BasePathFile{File: sourcef, path: b.path}, nil +} + +func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + name, err := b.RealPath(name) + if err != nil { + return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err} + } + if lstater, ok := b.source.(Lstater); ok { + return lstater.LstatIfPossible(name) + } + fi, err := b.source.Stat(name) + return fi, false, err +} + +func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error { + oldname, err := b.RealPath(oldname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + newname, err = b.RealPath(newname) + if err != nil { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err} + } + if linker, ok := b.source.(Linker); ok { + return linker.SymlinkIfPossible(oldname, newname) + } + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) { + name, err := b.RealPath(name) + if err != nil { + return "", &os.PathError{Op: "readlink", Path: name, Err: err} + } + if reader, ok := b.source.(LinkReader); ok { + return reader.ReadlinkIfPossible(name) + } + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go new file mode 100644 index 00000000..017d344f --- /dev/null +++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go @@ -0,0 +1,315 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +// If the cache duration is 0, cache time will be unlimited, i.e. once +// a file is in the layer, the base will never be read again for this file. +// +// For cache times greater than 0, the modification time of a file is +// checked. Note that a lot of file system implementations only allow a +// resolution of a second for timestamps... or as the godoc for os.Chtimes() +// states: "The underlying filesystem may truncate or round the values to a +// less precise time unit." +// +// This caching union will forward all write calls also to the base file +// system first. To prevent writing to the base Fs, wrap it in a read-only +// filter - Note: this will also make the overlay read-only, for writing files +// in the overlay, use the overlay Fs directly, not via the union Fs. +type CacheOnReadFs struct { + base Fs + layer Fs + cacheTime time.Duration +} + +func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs { + return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime} +} + +type cacheState int + +const ( + // not present in the overlay, unknown if it exists in the base: + cacheMiss cacheState = iota + // present in the overlay and in base, base file is newer: + cacheStale + // present in the overlay - with cache time == 0 it may exist in the base, + // with cacheTime > 0 it exists in the base and is same age or newer in the + // overlay + cacheHit + // happens if someone writes directly to the overlay without + // going through this union + cacheLocal +) + +func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) { + var lfi, bfi os.FileInfo + lfi, err = u.layer.Stat(name) + if err == nil { + if u.cacheTime == 0 { + return cacheHit, lfi, nil + } + if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) { + bfi, err = u.base.Stat(name) + if err != nil { + return cacheLocal, lfi, nil + } + if bfi.ModTime().After(lfi.ModTime()) { + return cacheStale, bfi, nil + } + } + return cacheHit, lfi, nil + } + + if err == syscall.ENOENT || os.IsNotExist(err) { + return cacheMiss, nil, nil + } + + return cacheMiss, nil, err +} + +func (u *CacheOnReadFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error { + return copyFileToLayer(u.base, u.layer, name, flag, perm) +} + +func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chtimes(name, atime, mtime) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chtimes(name, atime, mtime) + } + if err != nil { + return err + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chmod(name, mode) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chmod(name, mode) + } + if err != nil { + return err + } + return u.layer.Chmod(name, mode) +} + +func (u *CacheOnReadFs) Chown(name string, uid, gid int) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Chown(name, uid, gid) + case cacheStale, cacheMiss: + if err := u.copyToLayer(name); err != nil { + return err + } + err = u.base.Chown(name, uid, gid) + } + if err != nil { + return err + } + return u.layer.Chown(name, uid, gid) +} + +func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheMiss: + return u.base.Stat(name) + default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo + return fi, nil + } +} + +func (u *CacheOnReadFs) Rename(oldname, newname string) error { + st, _, err := u.cacheStatus(oldname) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit: + err = u.base.Rename(oldname, newname) + case cacheStale, cacheMiss: + if err := u.copyToLayer(oldname); err != nil { + return err + } + err = u.base.Rename(oldname, newname) + } + if err != nil { + return err + } + return u.layer.Rename(oldname, newname) +} + +func (u *CacheOnReadFs) Remove(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.Remove(name) + } + if err != nil { + return err + } + return u.layer.Remove(name) +} + +func (u *CacheOnReadFs) RemoveAll(name string) error { + st, _, err := u.cacheStatus(name) + if err != nil { + return err + } + switch st { + case cacheLocal: + case cacheHit, cacheStale, cacheMiss: + err = u.base.RemoveAll(name) + } + if err != nil { + return err + } + return u.layer.RemoveAll(name) +} + +func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + st, _, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + switch st { + case cacheLocal, cacheHit: + default: + if err := u.copyFileToLayer(name, flag, perm); err != nil { + return nil, err + } + } + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + bfi, err := u.base.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + lfi, err := u.layer.OpenFile(name, flag, perm) + if err != nil { + bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...? + return nil, err + } + return &UnionFile{Base: bfi, Layer: lfi}, nil + } + return u.layer.OpenFile(name, flag, perm) +} + +func (u *CacheOnReadFs) Open(name string) (File, error) { + st, fi, err := u.cacheStatus(name) + if err != nil { + return nil, err + } + + switch st { + case cacheLocal: + return u.layer.Open(name) + + case cacheMiss: + bfi, err := u.base.Stat(name) + if err != nil { + return nil, err + } + if bfi.IsDir() { + return u.base.Open(name) + } + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + + case cacheStale: + if !fi.IsDir() { + if err := u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.Open(name) + } + case cacheHit: + if !fi.IsDir() { + return u.layer.Open(name) + } + } + // the dirs from cacheHit, cacheStale fall down here: + bfile, _ := u.base.Open(name) + lfile, err := u.layer.Open(name) + if err != nil && bfile == nil { + return nil, err + } + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error { + err := u.base.Mkdir(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache +} + +func (u *CacheOnReadFs) Name() string { + return "CacheOnReadFs" +} + +func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error { + err := u.base.MkdirAll(name, perm) + if err != nil { + return err + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CacheOnReadFs) Create(name string) (File, error) { + bfh, err := u.base.Create(name) + if err != nil { + return nil, err + } + lfh, err := u.layer.Create(name) + if err != nil { + // oops, see comment about OS_TRUNC above, should we remove? then we have to + // remember if the file did not exist before + bfh.Close() + return nil, err + } + return &UnionFile{Base: bfh, Layer: lfh}, nil +} diff --git a/vendor/github.com/spf13/afero/const_bsds.go b/vendor/github.com/spf13/afero/const_bsds.go new file mode 100644 index 00000000..eed0f225 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_bsds.go @@ -0,0 +1,23 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly +// +build aix darwin openbsd freebsd netbsd dragonfly + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADF diff --git a/vendor/github.com/spf13/afero/const_win_unix.go b/vendor/github.com/spf13/afero/const_win_unix.go new file mode 100644 index 00000000..004d57e2 --- /dev/null +++ b/vendor/github.com/spf13/afero/const_win_unix.go @@ -0,0 +1,22 @@ +// Copyright © 2016 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix +// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix + +package afero + +import ( + "syscall" +) + +const BADFD = syscall.EBADFD diff --git a/vendor/github.com/spf13/afero/copyOnWriteFs.go b/vendor/github.com/spf13/afero/copyOnWriteFs.go new file mode 100644 index 00000000..184d6dd7 --- /dev/null +++ b/vendor/github.com/spf13/afero/copyOnWriteFs.go @@ -0,0 +1,327 @@ +package afero + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + "time" +) + +var _ Lstater = (*CopyOnWriteFs)(nil) + +// The CopyOnWriteFs is a union filesystem: a read only base file system with +// a possibly writeable layer on top. Changes to the file system will only +// be made in the overlay: Changing an existing file in the base layer which +// is not present in the overlay will copy the file to the overlay ("changing" +// includes also calls to e.g. Chtimes(), Chmod() and Chown()). +// +// Reading directories is currently only supported via Open(), not OpenFile(). +type CopyOnWriteFs struct { + base Fs + layer Fs +} + +func NewCopyOnWriteFs(base Fs, layer Fs) Fs { + return &CopyOnWriteFs{base: base, layer: layer} +} + +// Returns true if the file is not in the overlay +func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) { + if _, err := u.layer.Stat(name); err == nil { + return false, nil + } + _, err := u.base.Stat(name) + if err != nil { + if oerr, ok := err.(*os.PathError); ok { + if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT || oerr.Err == syscall.ENOTDIR { + return false, nil + } + } + if err == syscall.ENOENT { + return false, nil + } + } + return true, err +} + +func (u *CopyOnWriteFs) copyToLayer(name string) error { + return copyToLayer(u.base, u.layer, name) +} + +func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chtimes(name, atime, mtime) +} + +func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chmod(name, mode) +} + +func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error { + b, err := u.isBaseFile(name) + if err != nil { + return err + } + if b { + if err := u.copyToLayer(name); err != nil { + return err + } + } + return u.layer.Chown(name, uid, gid) +} + +func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) { + fi, err := u.layer.Stat(name) + if err != nil { + isNotExist := u.isNotExist(err) + if isNotExist { + return u.base.Stat(name) + } + return nil, err + } + return fi, nil +} + +func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + llayer, ok1 := u.layer.(Lstater) + lbase, ok2 := u.base.(Lstater) + + if ok1 { + fi, b, err := llayer.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + + if !u.isNotExist(err) { + return nil, b, err + } + } + + if ok2 { + fi, b, err := lbase.LstatIfPossible(name) + if err == nil { + return fi, b, nil + } + if !u.isNotExist(err) { + return nil, b, err + } + } + + fi, err := u.Stat(name) + + return fi, false, err +} + +func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error { + if slayer, ok := u.layer.(Linker); ok { + return slayer.SymlinkIfPossible(oldname, newname) + } + + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) { + if rlayer, ok := u.layer.(LinkReader); ok { + return rlayer.ReadlinkIfPossible(name) + } + + if rbase, ok := u.base.(LinkReader); ok { + return rbase.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + +func (u *CopyOnWriteFs) isNotExist(err error) bool { + if e, ok := err.(*os.PathError); ok { + err = e.Err + } + if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR { + return true + } + return false +} + +// Renaming files present only in the base layer is not permitted +func (u *CopyOnWriteFs) Rename(oldname, newname string) error { + b, err := u.isBaseFile(oldname) + if err != nil { + return err + } + if b { + return syscall.EPERM + } + return u.layer.Rename(oldname, newname) +} + +// Removing files present only in the base layer is not permitted. If +// a file is present in the base layer and the overlay, only the overlay +// will be removed. +func (u *CopyOnWriteFs) Remove(name string) error { + err := u.layer.Remove(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) RemoveAll(name string) error { + err := u.layer.RemoveAll(name) + switch err { + case syscall.ENOENT: + _, err = u.base.Stat(name) + if err == nil { + return syscall.EPERM + } + return syscall.ENOENT + default: + return err + } +} + +func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + if b { + if err = u.copyToLayer(name); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + dir := filepath.Dir(name) + isaDir, err := IsDir(u.base, dir) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + if isaDir { + if err = u.layer.MkdirAll(dir, 0o777); err != nil { + return nil, err + } + return u.layer.OpenFile(name, flag, perm) + } + + isaDir, err = IsDir(u.layer, dir) + if err != nil { + return nil, err + } + if isaDir { + return u.layer.OpenFile(name, flag, perm) + } + + return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOTDIR} // ...or os.ErrNotExist? + } + if b { + return u.base.OpenFile(name, flag, perm) + } + return u.layer.OpenFile(name, flag, perm) +} + +// This function handles the 9 different possibilities caused +// by the union which are the intersection of the following... +// +// layer: doesn't exist, exists as a file, and exists as a directory +// base: doesn't exist, exists as a file, and exists as a directory +func (u *CopyOnWriteFs) Open(name string) (File, error) { + // Since the overlay overrides the base we check that first + b, err := u.isBaseFile(name) + if err != nil { + return nil, err + } + + // If overlay doesn't exist, return the base (base state irrelevant) + if b { + return u.base.Open(name) + } + + // If overlay is a file, return it (base state irrelevant) + dir, err := IsDir(u.layer, name) + if err != nil { + return nil, err + } + if !dir { + return u.layer.Open(name) + } + + // Overlay is a directory, base state now matters. + // Base state has 3 states to check but 2 outcomes: + // A. It's a file or non-readable in the base (return just the overlay) + // B. It's an accessible directory in the base (return a UnionFile) + + // If base is file or nonreadable, return overlay + dir, err = IsDir(u.base, name) + if !dir || err != nil { + return u.layer.Open(name) + } + + // Both base & layer are directories + // Return union file (if opens are without error) + bfile, bErr := u.base.Open(name) + lfile, lErr := u.layer.Open(name) + + // If either have errors at this point something is very wrong. Return nil and the errors + if bErr != nil || lErr != nil { + return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr) + } + + return &UnionFile{Base: bfile, Layer: lfile}, nil +} + +func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + return ErrFileExists + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Name() string { + return "CopyOnWriteFs" +} + +func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error { + dir, err := IsDir(u.base, name) + if err != nil { + return u.layer.MkdirAll(name, perm) + } + if dir { + // This is in line with how os.MkdirAll behaves. + return nil + } + return u.layer.MkdirAll(name, perm) +} + +func (u *CopyOnWriteFs) Create(name string) (File, error) { + return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666) +} diff --git a/vendor/github.com/spf13/afero/httpFs.go b/vendor/github.com/spf13/afero/httpFs.go new file mode 100644 index 00000000..ac0de6d5 --- /dev/null +++ b/vendor/github.com/spf13/afero/httpFs.go @@ -0,0 +1,114 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" + "net/http" + "os" + "path" + "path/filepath" + "strings" + "time" +) + +type httpDir struct { + basePath string + fs HttpFs +} + +func (d httpDir) Open(name string) (http.File, error) { + if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) || + strings.Contains(name, "\x00") { + return nil, errors.New("http: invalid character in file path") + } + dir := string(d.basePath) + if dir == "" { + dir = "." + } + + f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name)))) + if err != nil { + return nil, err + } + return f, nil +} + +type HttpFs struct { + source Fs +} + +func NewHttpFs(source Fs) *HttpFs { + return &HttpFs{source: source} +} + +func (h HttpFs) Dir(s string) *httpDir { + return &httpDir{basePath: s, fs: h} +} + +func (h HttpFs) Name() string { return "h HttpFs" } + +func (h HttpFs) Create(name string) (File, error) { + return h.source.Create(name) +} + +func (h HttpFs) Chmod(name string, mode os.FileMode) error { + return h.source.Chmod(name, mode) +} + +func (h HttpFs) Chown(name string, uid, gid int) error { + return h.source.Chown(name, uid, gid) +} + +func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return h.source.Chtimes(name, atime, mtime) +} + +func (h HttpFs) Mkdir(name string, perm os.FileMode) error { + return h.source.Mkdir(name, perm) +} + +func (h HttpFs) MkdirAll(path string, perm os.FileMode) error { + return h.source.MkdirAll(path, perm) +} + +func (h HttpFs) Open(name string) (http.File, error) { + f, err := h.source.Open(name) + if err == nil { + if httpfile, ok := f.(http.File); ok { + return httpfile, nil + } + } + return nil, err +} + +func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return h.source.OpenFile(name, flag, perm) +} + +func (h HttpFs) Remove(name string) error { + return h.source.Remove(name) +} + +func (h HttpFs) RemoveAll(path string) error { + return h.source.RemoveAll(path) +} + +func (h HttpFs) Rename(oldname, newname string) error { + return h.source.Rename(oldname, newname) +} + +func (h HttpFs) Stat(name string) (os.FileInfo, error) { + return h.source.Stat(name) +} diff --git a/vendor/github.com/spf13/afero/internal/common/adapters.go b/vendor/github.com/spf13/afero/internal/common/adapters.go new file mode 100644 index 00000000..60685caa --- /dev/null +++ b/vendor/github.com/spf13/afero/internal/common/adapters.go @@ -0,0 +1,27 @@ +// Copyright © 2022 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package common + +import "io/fs" + +// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry +type FileInfoDirEntry struct { + fs.FileInfo +} + +var _ fs.DirEntry = FileInfoDirEntry{} + +func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() } + +func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil } diff --git a/vendor/github.com/spf13/afero/iofs.go b/vendor/github.com/spf13/afero/iofs.go new file mode 100644 index 00000000..938b9316 --- /dev/null +++ b/vendor/github.com/spf13/afero/iofs.go @@ -0,0 +1,298 @@ +//go:build go1.16 +// +build go1.16 + +package afero + +import ( + "io" + "io/fs" + "os" + "path" + "sort" + "time" + + "github.com/spf13/afero/internal/common" +) + +// IOFS adopts afero.Fs to stdlib io/fs.FS +type IOFS struct { + Fs +} + +func NewIOFS(fs Fs) IOFS { + return IOFS{Fs: fs} +} + +var ( + _ fs.FS = IOFS{} + _ fs.GlobFS = IOFS{} + _ fs.ReadDirFS = IOFS{} + _ fs.ReadFileFS = IOFS{} + _ fs.StatFS = IOFS{} + _ fs.SubFS = IOFS{} +) + +func (iofs IOFS) Open(name string) (fs.File, error) { + const op = "open" + + // by convention for fs.FS implementations we should perform this check + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + file, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + // file should implement fs.ReadDirFile + if _, ok := file.(fs.ReadDirFile); !ok { + file = readDirFile{file} + } + + return file, nil +} + +func (iofs IOFS) Glob(pattern string) ([]string, error) { + const op = "glob" + + // afero.Glob does not perform this check but it's required for implementations + if _, err := path.Match(pattern, ""); err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + items, err := Glob(iofs.Fs, pattern) + if err != nil { + return nil, iofs.wrapError(op, pattern, err) + } + + return items, nil +} + +func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) { + f, err := iofs.Fs.Open(name) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + + defer f.Close() + + if rdf, ok := f.(fs.ReadDirFile); ok { + items, err := rdf.ReadDir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() }) + return items, nil + } + + items, err := f.Readdir(-1) + if err != nil { + return nil, iofs.wrapError("readdir", name, err) + } + sort.Sort(byName(items)) + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +func (iofs IOFS) ReadFile(name string) ([]byte, error) { + const op = "readfile" + + if !fs.ValidPath(name) { + return nil, iofs.wrapError(op, name, fs.ErrInvalid) + } + + bytes, err := ReadFile(iofs.Fs, name) + if err != nil { + return nil, iofs.wrapError(op, name, err) + } + + return bytes, nil +} + +func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil } + +func (IOFS) wrapError(op, path string, err error) error { + if _, ok := err.(*fs.PathError); ok { + return err // don't need to wrap again + } + + return &fs.PathError{ + Op: op, + Path: path, + Err: err, + } +} + +// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open +type readDirFile struct { + File +} + +var _ fs.ReadDirFile = readDirFile{} + +func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) { + items, err := r.File.Readdir(n) + if err != nil { + return nil, err + } + + ret := make([]fs.DirEntry, len(items)) + for i := range items { + ret[i] = common.FileInfoDirEntry{FileInfo: items[i]} + } + + return ret, nil +} + +// FromIOFS adopts io/fs.FS to use it as afero.Fs +// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission +// To store modifications you may use afero.CopyOnWriteFs +type FromIOFS struct { + fs.FS +} + +var _ Fs = FromIOFS{} + +func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) } + +func (f FromIOFS) Mkdir(name string, perm os.FileMode) error { return notImplemented("mkdir", name) } + +func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error { + return notImplemented("mkdirall", path) +} + +func (f FromIOFS) Open(name string) (File, error) { + file, err := f.FS.Open(name) + if err != nil { + return nil, err + } + + return fromIOFSFile{File: file, name: name}, nil +} + +func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + return f.Open(name) +} + +func (f FromIOFS) Remove(name string) error { + return notImplemented("remove", name) +} + +func (f FromIOFS) RemoveAll(path string) error { + return notImplemented("removeall", path) +} + +func (f FromIOFS) Rename(oldname, newname string) error { + return notImplemented("rename", oldname) +} + +func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) } + +func (f FromIOFS) Name() string { return "fromiofs" } + +func (f FromIOFS) Chmod(name string, mode os.FileMode) error { + return notImplemented("chmod", name) +} + +func (f FromIOFS) Chown(name string, uid, gid int) error { + return notImplemented("chown", name) +} + +func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error { + return notImplemented("chtimes", name) +} + +type fromIOFSFile struct { + fs.File + name string +} + +func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) { + readerAt, ok := f.File.(io.ReaderAt) + if !ok { + return -1, notImplemented("readat", f.name) + } + + return readerAt.ReadAt(p, off) +} + +func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) { + seeker, ok := f.File.(io.Seeker) + if !ok { + return -1, notImplemented("seek", f.name) + } + + return seeker.Seek(offset, whence) +} + +func (f fromIOFSFile) Write(p []byte) (n int, err error) { + return -1, notImplemented("write", f.name) +} + +func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) { + return -1, notImplemented("writeat", f.name) +} + +func (f fromIOFSFile) Name() string { return f.name } + +func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(count) + if err != nil { + return nil, err + } + + ret := make([]os.FileInfo, len(entries)) + for i := range entries { + ret[i], err = entries[i].Info() + + if err != nil { + return nil, err + } + } + + return ret, nil +} + +func (f fromIOFSFile) Readdirnames(n int) ([]string, error) { + rdfile, ok := f.File.(fs.ReadDirFile) + if !ok { + return nil, notImplemented("readdir", f.name) + } + + entries, err := rdfile.ReadDir(n) + if err != nil { + return nil, err + } + + ret := make([]string, len(entries)) + for i := range entries { + ret[i] = entries[i].Name() + } + + return ret, nil +} + +func (f fromIOFSFile) Sync() error { return nil } + +func (f fromIOFSFile) Truncate(size int64) error { + return notImplemented("truncate", f.name) +} + +func (f fromIOFSFile) WriteString(s string) (ret int, err error) { + return -1, notImplemented("writestring", f.name) +} + +func notImplemented(op, path string) error { + return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission} +} diff --git a/vendor/github.com/spf13/afero/ioutil.go b/vendor/github.com/spf13/afero/ioutil.go new file mode 100644 index 00000000..fa6abe1e --- /dev/null +++ b/vendor/github.com/spf13/afero/ioutil.go @@ -0,0 +1,243 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "io" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +// byName implements sort.Interface. +type byName []os.FileInfo + +func (f byName) Len() int { return len(f) } +func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() } +func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] } + +// ReadDir reads the directory named by dirname and returns +// a list of sorted directory entries. +func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) { + return ReadDir(a.Fs, dirname) +} + +func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + list, err := f.Readdir(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Sort(byName(list)) + return list, nil +} + +// ReadFile reads the file named by filename and returns the contents. +// A successful call returns err == nil, not err == EOF. Because ReadFile +// reads the whole file, it does not treat an EOF from Read as an error +// to be reported. +func (a Afero) ReadFile(filename string) ([]byte, error) { + return ReadFile(a.Fs, filename) +} + +func ReadFile(fs Fs, filename string) ([]byte, error) { + f, err := fs.Open(filename) + if err != nil { + return nil, err + } + defer f.Close() + // It's a good but not certain bet that FileInfo will tell us exactly how much to + // read, so let's try it but be prepared for the answer to be wrong. + var n int64 + + if fi, err := f.Stat(); err == nil { + // Don't preallocate a huge buffer, just in case. + if size := fi.Size(); size < 1e9 { + n = size + } + } + // As initial capacity for readAll, use n + a little extra in case Size is zero, + // and to avoid another allocation after Read has filled the buffer. The readAll + // call will read into its allocated internal buffer cheaply. If the size was + // wrong, we'll either waste some space off the end or reallocate as needed, but + // in the overwhelmingly common case we'll get it just right. + return readAll(f, n+bytes.MinRead) +} + +// readAll reads from r until an error or EOF and returns the data it read +// from the internal buffer allocated with a specified capacity. +func readAll(r io.Reader, capacity int64) (b []byte, err error) { + buf := bytes.NewBuffer(make([]byte, 0, capacity)) + // If the buffer overflows, we will get bytes.ErrTooLarge. + // Return that as an error. Any other panic remains. + defer func() { + e := recover() + if e == nil { + return + } + if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge { + err = panicErr + } else { + panic(e) + } + }() + _, err = buf.ReadFrom(r) + return buf.Bytes(), err +} + +// ReadAll reads from r until an error or EOF and returns the data it read. +// A successful call returns err == nil, not err == EOF. Because ReadAll is +// defined to read from src until EOF, it does not treat an EOF from Read +// as an error to be reported. +func ReadAll(r io.Reader) ([]byte, error) { + return readAll(r, bytes.MinRead) +} + +// WriteFile writes data to a file named by filename. +// If the file does not exist, WriteFile creates it with permissions perm; +// otherwise WriteFile truncates it before writing. +func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error { + return WriteFile(a.Fs, filename, data, perm) +} + +func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error { + f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +// Random number state. +// We generate random temporary file names so that there's a good +// chance the file doesn't exist yet - keeps the number of tries in +// TempFile to a minimum. +var ( + randNum uint32 + randmu sync.Mutex +) + +func reseed() uint32 { + return uint32(time.Now().UnixNano() + int64(os.Getpid())) +} + +func nextRandom() string { + randmu.Lock() + r := randNum + if r == 0 { + r = reseed() + } + r = r*1664525 + 1013904223 // constants from Numerical Recipes + randNum = r + randmu.Unlock() + return strconv.Itoa(int(1e9 + r%1e9))[1:] +} + +// TempFile creates a new temporary file in the directory dir, +// opens the file for reading and writing, and returns the resulting *os.File. +// The filename is generated by taking pattern and adding a random +// string to the end. If pattern includes a "*", the random string +// replaces the last "*". +// If dir is the empty string, TempFile uses the default directory +// for temporary files (see os.TempDir). +// Multiple programs calling TempFile simultaneously +// will not choose the same file. The caller can use f.Name() +// to find the pathname of the file. It is the caller's responsibility +// to remove the file when no longer needed. +func (a Afero) TempFile(dir, pattern string) (f File, err error) { + return TempFile(a.Fs, dir, pattern) +} + +func TempFile(fs Fs, dir, pattern string) (f File, err error) { + if dir == "" { + dir = os.TempDir() + } + + var prefix, suffix string + if pos := strings.LastIndex(pattern, "*"); pos != -1 { + prefix, suffix = pattern[:pos], pattern[pos+1:] + } else { + prefix = pattern + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + name := filepath.Join(dir, prefix+nextRandom()+suffix) + f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + randNum = reseed() + randmu.Unlock() + } + continue + } + break + } + return +} + +// TempDir creates a new temporary directory in the directory dir +// with a name beginning with prefix and returns the path of the +// new directory. If dir is the empty string, TempDir uses the +// default directory for temporary files (see os.TempDir). +// Multiple programs calling TempDir simultaneously +// will not choose the same directory. It is the caller's responsibility +// to remove the directory when no longer needed. +func (a Afero) TempDir(dir, prefix string) (name string, err error) { + return TempDir(a.Fs, dir, prefix) +} + +func TempDir(fs Fs, dir, prefix string) (name string, err error) { + if dir == "" { + dir = os.TempDir() + } + + nconflict := 0 + for i := 0; i < 10000; i++ { + try := filepath.Join(dir, prefix+nextRandom()) + err = fs.Mkdir(try, 0o700) + if os.IsExist(err) { + if nconflict++; nconflict > 10 { + randmu.Lock() + randNum = reseed() + randmu.Unlock() + } + continue + } + if err == nil { + name = try + } + break + } + return +} diff --git a/vendor/github.com/spf13/afero/lstater.go b/vendor/github.com/spf13/afero/lstater.go new file mode 100644 index 00000000..89c1bfc0 --- /dev/null +++ b/vendor/github.com/spf13/afero/lstater.go @@ -0,0 +1,27 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" +) + +// Lstater is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Lstat if the filesystem iself is, or it delegates to, the os filesystem. +// Else it will call Stat. +// In addtion to the FileInfo, it will return a boolean telling whether Lstat was called or not. +type Lstater interface { + LstatIfPossible(name string) (os.FileInfo, bool, error) +} diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go new file mode 100644 index 00000000..7db4b7de --- /dev/null +++ b/vendor/github.com/spf13/afero/match.go @@ -0,0 +1,110 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2009 The Go Authors. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "path/filepath" + "sort" + "strings" +) + +// Glob returns the names of all files matching pattern or nil +// if there is no matching file. The syntax of patterns is the same +// as in Match. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the Separator is '/'). +// +// Glob ignores file system errors such as I/O errors reading directories. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +// This was adapted from (http://golang.org/pkg/path/filepath) and uses several +// built-ins from that package. +func Glob(fs Fs, pattern string) (matches []string, err error) { + if !hasMeta(pattern) { + // Lstat not supported by a ll filesystems. + if _, err = lstatIfPossible(fs, pattern); err != nil { + return nil, nil + } + return []string{pattern}, nil + } + + dir, file := filepath.Split(pattern) + switch dir { + case "": + dir = "." + case string(filepath.Separator): + // nothing + default: + dir = dir[0 : len(dir)-1] // chop off trailing separator + } + + if !hasMeta(dir) { + return glob(fs, dir, file, nil) + } + + var m []string + m, err = Glob(fs, dir) + if err != nil { + return + } + for _, d := range m { + matches, err = glob(fs, d, file, matches) + if err != nil { + return + } + } + return +} + +// glob searches for files matching pattern in the directory dir +// and appends them to matches. If the directory cannot be +// opened, it returns the existing matches. New matches are +// added in lexicographical order. +func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) { + m = matches + fi, err := fs.Stat(dir) + if err != nil { + return + } + if !fi.IsDir() { + return + } + d, err := fs.Open(dir) + if err != nil { + return + } + defer d.Close() + + names, _ := d.Readdirnames(-1) + sort.Strings(names) + + for _, n := range names { + matched, err := filepath.Match(pattern, n) + if err != nil { + return m, err + } + if matched { + m = append(m, filepath.Join(dir, n)) + } + } + return +} + +// hasMeta reports whether path contains any of the magic characters +// recognized by Match. +func hasMeta(path string) bool { + // TODO(niemeyer): Should other magic characters be added here? + return strings.ContainsAny(path, "*?[") +} diff --git a/vendor/github.com/spf13/afero/mem/dir.go b/vendor/github.com/spf13/afero/mem/dir.go new file mode 100644 index 00000000..e104013f --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dir.go @@ -0,0 +1,37 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +type Dir interface { + Len() int + Names() []string + Files() []*FileData + Add(*FileData) + Remove(*FileData) +} + +func RemoveFromMemDir(dir *FileData, f *FileData) { + dir.memDir.Remove(f) +} + +func AddToMemDir(dir *FileData, f *FileData) { + dir.memDir.Add(f) +} + +func InitializeDir(d *FileData) { + if d.memDir == nil { + d.dir = true + d.memDir = &DirMap{} + } +} diff --git a/vendor/github.com/spf13/afero/mem/dirmap.go b/vendor/github.com/spf13/afero/mem/dirmap.go new file mode 100644 index 00000000..03a57ee5 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/dirmap.go @@ -0,0 +1,43 @@ +// Copyright © 2015 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import "sort" + +type DirMap map[string]*FileData + +func (m DirMap) Len() int { return len(m) } +func (m DirMap) Add(f *FileData) { m[f.name] = f } +func (m DirMap) Remove(f *FileData) { delete(m, f.name) } +func (m DirMap) Files() (files []*FileData) { + for _, f := range m { + files = append(files, f) + } + sort.Sort(filesSorter(files)) + return files +} + +// implement sort.Interface for []*FileData +type filesSorter []*FileData + +func (s filesSorter) Len() int { return len(s) } +func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name } + +func (m DirMap) Names() (names []string) { + for x := range m { + names = append(names, x) + } + return names +} diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go new file mode 100644 index 00000000..62fe4498 --- /dev/null +++ b/vendor/github.com/spf13/afero/mem/file.go @@ -0,0 +1,359 @@ +// Copyright © 2015 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package mem + +import ( + "bytes" + "errors" + "io" + "io/fs" + "os" + "path/filepath" + "sync" + "sync/atomic" + "time" + + "github.com/spf13/afero/internal/common" +) + +const FilePathSeparator = string(filepath.Separator) + +var _ fs.ReadDirFile = &File{} + +type File struct { + // atomic requires 64-bit alignment for struct field access + at int64 + readDirCount int64 + closed bool + readOnly bool + fileData *FileData +} + +func NewFileHandle(data *FileData) *File { + return &File{fileData: data} +} + +func NewReadOnlyFileHandle(data *FileData) *File { + return &File{fileData: data, readOnly: true} +} + +func (f File) Data() *FileData { + return f.fileData +} + +type FileData struct { + sync.Mutex + name string + data []byte + memDir Dir + dir bool + mode os.FileMode + modtime time.Time + uid int + gid int +} + +func (d *FileData) Name() string { + d.Lock() + defer d.Unlock() + return d.name +} + +func CreateFile(name string) *FileData { + return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()} +} + +func CreateDir(name string) *FileData { + return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()} +} + +func ChangeFileName(f *FileData, newname string) { + f.Lock() + f.name = newname + f.Unlock() +} + +func SetMode(f *FileData, mode os.FileMode) { + f.Lock() + f.mode = mode + f.Unlock() +} + +func SetModTime(f *FileData, mtime time.Time) { + f.Lock() + setModTime(f, mtime) + f.Unlock() +} + +func setModTime(f *FileData, mtime time.Time) { + f.modtime = mtime +} + +func SetUID(f *FileData, uid int) { + f.Lock() + f.uid = uid + f.Unlock() +} + +func SetGID(f *FileData, gid int) { + f.Lock() + f.gid = gid + f.Unlock() +} + +func GetFileInfo(f *FileData) *FileInfo { + return &FileInfo{f} +} + +func (f *File) Open() error { + atomic.StoreInt64(&f.at, 0) + atomic.StoreInt64(&f.readDirCount, 0) + f.fileData.Lock() + f.closed = false + f.fileData.Unlock() + return nil +} + +func (f *File) Close() error { + f.fileData.Lock() + f.closed = true + if !f.readOnly { + setModTime(f.fileData, time.Now()) + } + f.fileData.Unlock() + return nil +} + +func (f *File) Name() string { + return f.fileData.Name() +} + +func (f *File) Stat() (os.FileInfo, error) { + return &FileInfo{f.fileData}, nil +} + +func (f *File) Sync() error { + return nil +} + +func (f *File) Readdir(count int) (res []os.FileInfo, err error) { + if !f.fileData.dir { + return nil, &os.PathError{Op: "readdir", Path: f.fileData.name, Err: errors.New("not a dir")} + } + var outLength int64 + + f.fileData.Lock() + files := f.fileData.memDir.Files()[f.readDirCount:] + if count > 0 { + if len(files) < count { + outLength = int64(len(files)) + } else { + outLength = int64(count) + } + if len(files) == 0 { + err = io.EOF + } + } else { + outLength = int64(len(files)) + } + f.readDirCount += outLength + f.fileData.Unlock() + + res = make([]os.FileInfo, outLength) + for i := range res { + res[i] = &FileInfo{files[i]} + } + + return res, err +} + +func (f *File) Readdirnames(n int) (names []string, err error) { + fi, err := f.Readdir(n) + names = make([]string, len(fi)) + for i, f := range fi { + _, names[i] = filepath.Split(f.Name()) + } + return names, err +} + +// Implements fs.ReadDirFile +func (f *File) ReadDir(n int) ([]fs.DirEntry, error) { + fi, err := f.Readdir(n) + if err != nil { + return nil, err + } + di := make([]fs.DirEntry, len(fi)) + for i, f := range fi { + di[i] = common.FileInfoDirEntry{FileInfo: f} + } + return di, nil +} + +func (f *File) Read(b []byte) (n int, err error) { + f.fileData.Lock() + defer f.fileData.Unlock() + if f.closed { + return 0, ErrFileClosed + } + if len(b) > 0 && int(f.at) == len(f.fileData.data) { + return 0, io.EOF + } + if int(f.at) > len(f.fileData.data) { + return 0, io.ErrUnexpectedEOF + } + if len(f.fileData.data)-int(f.at) >= len(b) { + n = len(b) + } else { + n = len(f.fileData.data) - int(f.at) + } + copy(b, f.fileData.data[f.at:f.at+int64(n)]) + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) ReadAt(b []byte, off int64) (n int, err error) { + prev := atomic.LoadInt64(&f.at) + atomic.StoreInt64(&f.at, off) + n, err = f.Read(b) + atomic.StoreInt64(&f.at, prev) + return +} + +func (f *File) Truncate(size int64) error { + if f.closed { + return ErrFileClosed + } + if f.readOnly { + return &os.PathError{Op: "truncate", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + if size < 0 { + return ErrOutOfRange + } + f.fileData.Lock() + defer f.fileData.Unlock() + if size > int64(len(f.fileData.data)) { + diff := size - int64(len(f.fileData.data)) + f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...) + } else { + f.fileData.data = f.fileData.data[0:size] + } + setModTime(f.fileData, time.Now()) + return nil +} + +func (f *File) Seek(offset int64, whence int) (int64, error) { + if f.closed { + return 0, ErrFileClosed + } + switch whence { + case io.SeekStart: + atomic.StoreInt64(&f.at, offset) + case io.SeekCurrent: + atomic.AddInt64(&f.at, offset) + case io.SeekEnd: + atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset) + } + return f.at, nil +} + +func (f *File) Write(b []byte) (n int, err error) { + if f.closed { + return 0, ErrFileClosed + } + if f.readOnly { + return 0, &os.PathError{Op: "write", Path: f.fileData.name, Err: errors.New("file handle is read only")} + } + n = len(b) + cur := atomic.LoadInt64(&f.at) + f.fileData.Lock() + defer f.fileData.Unlock() + diff := cur - int64(len(f.fileData.data)) + var tail []byte + if n+int(cur) < len(f.fileData.data) { + tail = f.fileData.data[n+int(cur):] + } + if diff > 0 { + f.fileData.data = append(f.fileData.data, append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...) + f.fileData.data = append(f.fileData.data, tail...) + } else { + f.fileData.data = append(f.fileData.data[:cur], b...) + f.fileData.data = append(f.fileData.data, tail...) + } + setModTime(f.fileData, time.Now()) + + atomic.AddInt64(&f.at, int64(n)) + return +} + +func (f *File) WriteAt(b []byte, off int64) (n int, err error) { + atomic.StoreInt64(&f.at, off) + return f.Write(b) +} + +func (f *File) WriteString(s string) (ret int, err error) { + return f.Write([]byte(s)) +} + +func (f *File) Info() *FileInfo { + return &FileInfo{f.fileData} +} + +type FileInfo struct { + *FileData +} + +// Implements os.FileInfo +func (s *FileInfo) Name() string { + s.Lock() + _, name := filepath.Split(s.name) + s.Unlock() + return name +} + +func (s *FileInfo) Mode() os.FileMode { + s.Lock() + defer s.Unlock() + return s.mode +} + +func (s *FileInfo) ModTime() time.Time { + s.Lock() + defer s.Unlock() + return s.modtime +} + +func (s *FileInfo) IsDir() bool { + s.Lock() + defer s.Unlock() + return s.dir +} +func (s *FileInfo) Sys() interface{} { return nil } +func (s *FileInfo) Size() int64 { + if s.IsDir() { + return int64(42) + } + s.Lock() + defer s.Unlock() + return int64(len(s.data)) +} + +var ( + ErrFileClosed = errors.New("File is closed") + ErrOutOfRange = errors.New("out of range") + ErrTooLarge = errors.New("too large") + ErrFileNotFound = os.ErrNotExist + ErrFileExists = os.ErrExist + ErrDestinationExists = os.ErrExist +) diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go new file mode 100644 index 00000000..3f4ef42d --- /dev/null +++ b/vendor/github.com/spf13/afero/memmap.go @@ -0,0 +1,421 @@ +// Copyright © 2014 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "fmt" + "log" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/spf13/afero/mem" +) + +const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod() + +type MemMapFs struct { + mu sync.RWMutex + data map[string]*mem.FileData + init sync.Once +} + +func NewMemMapFs() Fs { + return &MemMapFs{} +} + +func (m *MemMapFs) getData() map[string]*mem.FileData { + m.init.Do(func() { + m.data = make(map[string]*mem.FileData) + // Root should always exist, right? + // TODO: what about windows? + root := mem.CreateDir(FilePathSeparator) + mem.SetMode(root, os.ModeDir|0o755) + m.data[FilePathSeparator] = root + }) + return m.data +} + +func (*MemMapFs) Name() string { return "MemMapFS" } + +func (m *MemMapFs) Create(name string) (File, error) { + name = normalizePath(name) + m.mu.Lock() + file := mem.CreateFile(name) + m.getData()[name] = file + m.registerWithParent(file, 0) + m.mu.Unlock() + return mem.NewFileHandle(file), nil +} + +func (m *MemMapFs) unRegisterWithParent(fileName string) error { + f, err := m.lockfreeOpen(fileName) + if err != nil { + return err + } + parent := m.findParent(f) + if parent == nil { + log.Panic("parent of ", f.Name(), " is nil") + } + + parent.Lock() + mem.RemoveFromMemDir(parent, f) + parent.Unlock() + return nil +} + +func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData { + pdir, _ := filepath.Split(f.Name()) + pdir = filepath.Clean(pdir) + pfile, err := m.lockfreeOpen(pdir) + if err != nil { + return nil + } + return pfile +} + +func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) { + if f == nil { + return + } + parent := m.findParent(f) + if parent == nil { + pdir := filepath.Dir(filepath.Clean(f.Name())) + err := m.lockfreeMkdir(pdir, perm) + if err != nil { + // log.Println("Mkdir error:", err) + return + } + parent, err = m.lockfreeOpen(pdir) + if err != nil { + // log.Println("Open after Mkdir error:", err) + return + } + } + + parent.Lock() + mem.InitializeDir(parent) + mem.AddToMemDir(parent, f) + parent.Unlock() +} + +func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error { + name = normalizePath(name) + x, ok := m.getData()[name] + if ok { + // Only return ErrFileExists if it's a file, not a directory. + i := mem.FileInfo{FileData: x} + if !i.IsDir() { + return ErrFileExists + } + } else { + item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) + m.getData()[name] = item + m.registerWithParent(item, perm) + } + return nil +} + +func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error { + perm &= chmodBits + name = normalizePath(name) + + m.mu.RLock() + _, ok := m.getData()[name] + m.mu.RUnlock() + if ok { + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + + m.mu.Lock() + // Dobule check that it doesn't exist. + if _, ok := m.getData()[name]; ok { + m.mu.Unlock() + return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists} + } + item := mem.CreateDir(name) + mem.SetMode(item, os.ModeDir|perm) + m.getData()[name] = item + m.registerWithParent(item, perm) + m.mu.Unlock() + + return m.setFileMode(name, perm|os.ModeDir) +} + +func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error { + err := m.Mkdir(path, perm) + if err != nil { + if err.(*os.PathError).Err == ErrFileExists { + return nil + } + return err + } + return nil +} + +// Handle some relative paths +func normalizePath(path string) string { + path = filepath.Clean(path) + + switch path { + case ".": + return FilePathSeparator + case "..": + return FilePathSeparator + default: + return path + } +} + +func (m *MemMapFs) Open(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewReadOnlyFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) openWrite(name string) (File, error) { + f, err := m.open(name) + if f != nil { + return mem.NewFileHandle(f), err + } + return nil, err +} + +func (m *MemMapFs) open(name string) (*mem.FileData, error) { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound} + } + return f, nil +} + +func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) { + name = normalizePath(name) + f, ok := m.getData()[name] + if ok { + return f, nil + } else { + return nil, ErrFileNotFound + } +} + +func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + perm &= chmodBits + chmod := false + file, err := m.openWrite(name) + if err == nil && (flag&os.O_EXCL > 0) { + return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists} + } + if os.IsNotExist(err) && (flag&os.O_CREATE > 0) { + file, err = m.Create(name) + chmod = true + } + if err != nil { + return nil, err + } + if flag == os.O_RDONLY { + file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data()) + } + if flag&os.O_APPEND > 0 { + _, err = file.Seek(0, os.SEEK_END) + if err != nil { + file.Close() + return nil, err + } + } + if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 { + err = file.Truncate(0) + if err != nil { + file.Close() + return nil, err + } + } + if chmod { + return file, m.setFileMode(name, perm) + } + return file, nil +} + +func (m *MemMapFs) Remove(name string) error { + name = normalizePath(name) + + m.mu.Lock() + defer m.mu.Unlock() + + if _, ok := m.getData()[name]; ok { + err := m.unRegisterWithParent(name) + if err != nil { + return &os.PathError{Op: "remove", Path: name, Err: err} + } + delete(m.getData(), name) + } else { + return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist} + } + return nil +} + +func (m *MemMapFs) RemoveAll(path string) error { + path = normalizePath(path) + m.mu.Lock() + m.unRegisterWithParent(path) + m.mu.Unlock() + + m.mu.RLock() + defer m.mu.RUnlock() + + for p := range m.getData() { + if p == path || strings.HasPrefix(p, path+FilePathSeparator) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) Rename(oldname, newname string) error { + oldname = normalizePath(oldname) + newname = normalizePath(newname) + + if oldname == newname { + return nil + } + + m.mu.RLock() + defer m.mu.RUnlock() + if _, ok := m.getData()[oldname]; ok { + m.mu.RUnlock() + m.mu.Lock() + m.unRegisterWithParent(oldname) + fileData := m.getData()[oldname] + delete(m.getData(), oldname) + mem.ChangeFileName(fileData, newname) + m.getData()[newname] = fileData + m.registerWithParent(fileData, 0) + m.mu.Unlock() + m.mu.RLock() + } else { + return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound} + } + + for p, fileData := range m.getData() { + if strings.HasPrefix(p, oldname+FilePathSeparator) { + m.mu.RUnlock() + m.mu.Lock() + delete(m.getData(), p) + p := strings.Replace(p, oldname, newname, 1) + m.getData()[p] = fileData + m.mu.Unlock() + m.mu.RLock() + } + } + return nil +} + +func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fileInfo, err := m.Stat(name) + return fileInfo, false, err +} + +func (m *MemMapFs) Stat(name string) (os.FileInfo, error) { + f, err := m.Open(name) + if err != nil { + return nil, err + } + fi := mem.GetFileInfo(f.(*mem.File).Data()) + return fi, nil +} + +func (m *MemMapFs) Chmod(name string, mode os.FileMode) error { + mode &= chmodBits + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits + + mode = prevOtherBits | mode + return m.setFileMode(name, mode) +} + +func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetMode(f, mode) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) Chown(name string, uid, gid int) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound} + } + + mem.SetUID(f, uid) + mem.SetGID(f, gid) + + return nil +} + +func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + name = normalizePath(name) + + m.mu.RLock() + f, ok := m.getData()[name] + m.mu.RUnlock() + if !ok { + return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound} + } + + m.mu.Lock() + mem.SetModTime(f, mtime) + m.mu.Unlock() + + return nil +} + +func (m *MemMapFs) List() { + for _, x := range m.data { + y := mem.FileInfo{FileData: x} + fmt.Println(x.Name(), y.Size()) + } +} diff --git a/vendor/github.com/spf13/afero/os.go b/vendor/github.com/spf13/afero/os.go new file mode 100644 index 00000000..f1366321 --- /dev/null +++ b/vendor/github.com/spf13/afero/os.go @@ -0,0 +1,113 @@ +// Copyright © 2014 Steve Francia . +// Copyright 2013 tsuru authors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "time" +) + +var _ Lstater = (*OsFs)(nil) + +// OsFs is a Fs implementation that uses functions provided by the os package. +// +// For details in any method, check the documentation of the os package +// (http://golang.org/pkg/os/). +type OsFs struct{} + +func NewOsFs() Fs { + return &OsFs{} +} + +func (OsFs) Name() string { return "OsFs" } + +func (OsFs) Create(name string) (File, error) { + f, e := os.Create(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Mkdir(name string, perm os.FileMode) error { + return os.Mkdir(name, perm) +} + +func (OsFs) MkdirAll(path string, perm os.FileMode) error { + return os.MkdirAll(path, perm) +} + +func (OsFs) Open(name string) (File, error) { + f, e := os.Open(name) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + f, e := os.OpenFile(name, flag, perm) + if f == nil { + // while this looks strange, we need to return a bare nil (of type nil) not + // a nil value of type *os.File or nil won't be nil + return nil, e + } + return f, e +} + +func (OsFs) Remove(name string) error { + return os.Remove(name) +} + +func (OsFs) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (OsFs) Rename(oldname, newname string) error { + return os.Rename(oldname, newname) +} + +func (OsFs) Stat(name string) (os.FileInfo, error) { + return os.Stat(name) +} + +func (OsFs) Chmod(name string, mode os.FileMode) error { + return os.Chmod(name, mode) +} + +func (OsFs) Chown(name string, uid, gid int) error { + return os.Chown(name, uid, gid) +} + +func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error { + return os.Chtimes(name, atime, mtime) +} + +func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + fi, err := os.Lstat(name) + return fi, true, err +} + +func (OsFs) SymlinkIfPossible(oldname, newname string) error { + return os.Symlink(oldname, newname) +} + +func (OsFs) ReadlinkIfPossible(name string) (string, error) { + return os.Readlink(name) +} diff --git a/vendor/github.com/spf13/afero/path.go b/vendor/github.com/spf13/afero/path.go new file mode 100644 index 00000000..18f60a0f --- /dev/null +++ b/vendor/github.com/spf13/afero/path.go @@ -0,0 +1,106 @@ +// Copyright ©2015 The Go Authors +// Copyright ©2015 Steve Francia +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "os" + "path/filepath" + "sort" +) + +// readDirNames reads the directory named by dirname and returns +// a sorted list of directory entries. +// adapted from https://golang.org/src/path/filepath/path.go +func readDirNames(fs Fs, dirname string) ([]string, error) { + f, err := fs.Open(dirname) + if err != nil { + return nil, err + } + names, err := f.Readdirnames(-1) + f.Close() + if err != nil { + return nil, err + } + sort.Strings(names) + return names, nil +} + +// walk recursively descends path, calling walkFn +// adapted from https://golang.org/src/path/filepath/path.go +func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error { + err := walkFn(path, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + + if !info.IsDir() { + return nil + } + + names, err := readDirNames(fs, path) + if err != nil { + return walkFn(path, info, err) + } + + for _, name := range names { + filename := filepath.Join(path, name) + fileInfo, err := lstatIfPossible(fs, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walk(fs, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} + +// if the filesystem supports it, use Lstat, else use fs.Stat +func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) { + if lfs, ok := fs.(Lstater); ok { + fi, _, err := lfs.LstatIfPossible(path) + return fi, err + } + return fs.Stat(path) +} + +// Walk walks the file tree rooted at root, calling walkFn for each file or +// directory in the tree, including root. All errors that arise visiting files +// and directories are filtered by walkFn. The files are walked in lexical +// order, which makes the output deterministic but means that for very +// large directories Walk can be inefficient. +// Walk does not follow symbolic links. + +func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error { + return Walk(a.Fs, root, walkFn) +} + +func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error { + info, err := lstatIfPossible(fs, root) + if err != nil { + return walkFn(root, nil, err) + } + return walk(fs, root, info, walkFn) +} diff --git a/vendor/github.com/spf13/afero/readonlyfs.go b/vendor/github.com/spf13/afero/readonlyfs.go new file mode 100644 index 00000000..bd8f9264 --- /dev/null +++ b/vendor/github.com/spf13/afero/readonlyfs.go @@ -0,0 +1,96 @@ +package afero + +import ( + "os" + "syscall" + "time" +) + +var _ Lstater = (*ReadOnlyFs)(nil) + +type ReadOnlyFs struct { + source Fs +} + +func NewReadOnlyFs(source Fs) Fs { + return &ReadOnlyFs{source: source} +} + +func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) { + return ReadDir(r.source, name) +} + +func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Chown(n string, uid, gid int) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Name() string { + return "ReadOnlyFilter" +} + +func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) { + return r.source.Stat(name) +} + +func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) { + if lsf, ok := r.source.(Lstater); ok { + return lsf.LstatIfPossible(name) + } + fi, err := r.Stat(name) + return fi, false, err +} + +func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error { + return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink} +} + +func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) { + if srdr, ok := r.source.(LinkReader); ok { + return srdr.ReadlinkIfPossible(name) + } + + return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink} +} + +func (r *ReadOnlyFs) Rename(o, n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) RemoveAll(p string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Remove(n string) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 { + return nil, syscall.EPERM + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *ReadOnlyFs) Open(n string) (File, error) { + return r.source.Open(n) +} + +func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error { + return syscall.EPERM +} + +func (r *ReadOnlyFs) Create(n string) (File, error) { + return nil, syscall.EPERM +} diff --git a/vendor/github.com/spf13/afero/regexpfs.go b/vendor/github.com/spf13/afero/regexpfs.go new file mode 100644 index 00000000..218f3b23 --- /dev/null +++ b/vendor/github.com/spf13/afero/regexpfs.go @@ -0,0 +1,223 @@ +package afero + +import ( + "os" + "regexp" + "syscall" + "time" +) + +// The RegexpFs filters files (not directories) by regular expression. Only +// files matching the given regexp will be allowed, all others get a ENOENT error ( +// "No such file or directory"). +type RegexpFs struct { + re *regexp.Regexp + source Fs +} + +func NewRegexpFs(source Fs, re *regexp.Regexp) Fs { + return &RegexpFs{source: source, re: re} +} + +type RegexpFile struct { + f File + re *regexp.Regexp +} + +func (r *RegexpFs) matchesName(name string) error { + if r.re == nil { + return nil + } + if r.re.MatchString(name) { + return nil + } + return syscall.ENOENT +} + +func (r *RegexpFs) dirOrMatches(name string) error { + dir, err := IsDir(r.source, name) + if err != nil { + return err + } + if dir { + return nil + } + return r.matchesName(name) +} + +func (r *RegexpFs) Chtimes(name string, a, m time.Time) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chtimes(name, a, m) +} + +func (r *RegexpFs) Chmod(name string, mode os.FileMode) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chmod(name, mode) +} + +func (r *RegexpFs) Chown(name string, uid, gid int) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Chown(name, uid, gid) +} + +func (r *RegexpFs) Name() string { + return "RegexpFs" +} + +func (r *RegexpFs) Stat(name string) (os.FileInfo, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.Stat(name) +} + +func (r *RegexpFs) Rename(oldname, newname string) error { + dir, err := IsDir(r.source, oldname) + if err != nil { + return err + } + if dir { + return nil + } + if err := r.matchesName(oldname); err != nil { + return err + } + if err := r.matchesName(newname); err != nil { + return err + } + return r.source.Rename(oldname, newname) +} + +func (r *RegexpFs) RemoveAll(p string) error { + dir, err := IsDir(r.source, p) + if err != nil { + return err + } + if !dir { + if err := r.matchesName(p); err != nil { + return err + } + } + return r.source.RemoveAll(p) +} + +func (r *RegexpFs) Remove(name string) error { + if err := r.dirOrMatches(name); err != nil { + return err + } + return r.source.Remove(name) +} + +func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { + if err := r.dirOrMatches(name); err != nil { + return nil, err + } + return r.source.OpenFile(name, flag, perm) +} + +func (r *RegexpFs) Open(name string) (File, error) { + dir, err := IsDir(r.source, name) + if err != nil { + return nil, err + } + if !dir { + if err := r.matchesName(name); err != nil { + return nil, err + } + } + f, err := r.source.Open(name) + if err != nil { + return nil, err + } + return &RegexpFile{f: f, re: r.re}, nil +} + +func (r *RegexpFs) Mkdir(n string, p os.FileMode) error { + return r.source.Mkdir(n, p) +} + +func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error { + return r.source.MkdirAll(n, p) +} + +func (r *RegexpFs) Create(name string) (File, error) { + if err := r.matchesName(name); err != nil { + return nil, err + } + return r.source.Create(name) +} + +func (f *RegexpFile) Close() error { + return f.f.Close() +} + +func (f *RegexpFile) Read(s []byte) (int, error) { + return f.f.Read(s) +} + +func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) { + return f.f.ReadAt(s, o) +} + +func (f *RegexpFile) Seek(o int64, w int) (int64, error) { + return f.f.Seek(o, w) +} + +func (f *RegexpFile) Write(s []byte) (int, error) { + return f.f.Write(s) +} + +func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) { + return f.f.WriteAt(s, o) +} + +func (f *RegexpFile) Name() string { + return f.f.Name() +} + +func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) { + var rfi []os.FileInfo + rfi, err = f.f.Readdir(c) + if err != nil { + return nil, err + } + for _, i := range rfi { + if i.IsDir() || f.re.MatchString(i.Name()) { + fi = append(fi, i) + } + } + return fi, nil +} + +func (f *RegexpFile) Readdirnames(c int) (n []string, err error) { + fi, err := f.Readdir(c) + if err != nil { + return nil, err + } + for _, s := range fi { + n = append(n, s.Name()) + } + return n, nil +} + +func (f *RegexpFile) Stat() (os.FileInfo, error) { + return f.f.Stat() +} + +func (f *RegexpFile) Sync() error { + return f.f.Sync() +} + +func (f *RegexpFile) Truncate(s int64) error { + return f.f.Truncate(s) +} + +func (f *RegexpFile) WriteString(s string) (int, error) { + return f.f.WriteString(s) +} diff --git a/vendor/github.com/spf13/afero/symlink.go b/vendor/github.com/spf13/afero/symlink.go new file mode 100644 index 00000000..aa6ae125 --- /dev/null +++ b/vendor/github.com/spf13/afero/symlink.go @@ -0,0 +1,55 @@ +// Copyright © 2018 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "errors" +) + +// Symlinker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It indicates support for 3 symlink related interfaces that implement the +// behaviors of the os methods: +// - Lstat +// - Symlink, and +// - Readlink +type Symlinker interface { + Lstater + Linker + LinkReader +} + +// Linker is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem, +// or the filesystem otherwise supports Symlink's. +type Linker interface { + SymlinkIfPossible(oldname, newname string) error +} + +// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system +// does not support Symlink's either directly or through its delegated filesystem. +// As expressed by support for the Linker interface. +var ErrNoSymlink = errors.New("symlink not supported") + +// LinkReader is an optional interface in Afero. It is only implemented by the +// filesystems saying so. +type LinkReader interface { + ReadlinkIfPossible(name string) (string, error) +} + +// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system +// does not support the readlink operation either directly or through its delegated filesystem. +// As expressed by support for the LinkReader interface. +var ErrNoReadlink = errors.New("readlink not supported") diff --git a/vendor/github.com/spf13/afero/unionFile.go b/vendor/github.com/spf13/afero/unionFile.go new file mode 100644 index 00000000..f02e7553 --- /dev/null +++ b/vendor/github.com/spf13/afero/unionFile.go @@ -0,0 +1,330 @@ +package afero + +import ( + "io" + "os" + "path/filepath" + "syscall" +) + +// The UnionFile implements the afero.File interface and will be returned +// when reading a directory present at least in the overlay or opening a file +// for writing. +// +// The calls to +// Readdir() and Readdirnames() merge the file os.FileInfo / names from the +// base and the overlay - for files present in both layers, only those +// from the overlay will be used. +// +// When opening files for writing (Create() / OpenFile() with the right flags) +// the operations will be done in both layers, starting with the overlay. A +// successful read in the overlay will move the cursor position in the base layer +// by the number of bytes read. +type UnionFile struct { + Base File + Layer File + Merger DirsMerger + off int + files []os.FileInfo +} + +func (f *UnionFile) Close() error { + // first close base, so we have a newer timestamp in the overlay. If we'd close + // the overlay first, we'd get a cacheStale the next time we access this file + // -> cache would be useless ;-) + if f.Base != nil { + f.Base.Close() + } + if f.Layer != nil { + return f.Layer.Close() + } + return BADFD +} + +func (f *UnionFile) Read(s []byte) (int, error) { + if f.Layer != nil { + n, err := f.Layer.Read(s) + if (err == nil || err == io.EOF) && f.Base != nil { + // advance the file position also in the base file, the next + // call may be a write at this position (or a seek with SEEK_CUR) + if _, seekErr := f.Base.Seek(int64(n), os.SEEK_CUR); seekErr != nil { + // only overwrite err in case the seek fails: we need to + // report an eventual io.EOF to the caller + err = seekErr + } + } + return n, err + } + if f.Base != nil { + return f.Base.Read(s) + } + return 0, BADFD +} + +func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) { + if f.Layer != nil { + n, err := f.Layer.ReadAt(s, o) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o+int64(n), io.SeekStart) + } + return n, err + } + if f.Base != nil { + return f.Base.ReadAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) { + if f.Layer != nil { + pos, err = f.Layer.Seek(o, w) + if (err == nil || err == io.EOF) && f.Base != nil { + _, err = f.Base.Seek(o, w) + } + return pos, err + } + if f.Base != nil { + return f.Base.Seek(o, w) + } + return 0, BADFD +} + +func (f *UnionFile) Write(s []byte) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.Write(s) + if err == nil && f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark? + _, err = f.Base.Write(s) + } + return n, err + } + if f.Base != nil { + return f.Base.Write(s) + } + return 0, BADFD +} + +func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteAt(s, o) + if err == nil && f.Base != nil { + _, err = f.Base.WriteAt(s, o) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteAt(s, o) + } + return 0, BADFD +} + +func (f *UnionFile) Name() string { + if f.Layer != nil { + return f.Layer.Name() + } + return f.Base.Name() +} + +// DirsMerger is how UnionFile weaves two directories together. +// It takes the FileInfo slices from the layer and the base and returns a +// single view. +type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) + +var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) { + files := make(map[string]os.FileInfo) + + for _, fi := range lofi { + files[fi.Name()] = fi + } + + for _, fi := range bofi { + if _, exists := files[fi.Name()]; !exists { + files[fi.Name()] = fi + } + } + + rfi := make([]os.FileInfo, len(files)) + + i := 0 + for _, fi := range files { + rfi[i] = fi + i++ + } + + return rfi, nil +} + +// Readdir will weave the two directories together and +// return a single view of the overlayed directories. +// At the end of the directory view, the error is io.EOF if c > 0. +func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) { + var merge DirsMerger = f.Merger + if merge == nil { + merge = defaultUnionMergeDirsFn + } + + if f.off == 0 { + var lfi []os.FileInfo + if f.Layer != nil { + lfi, err = f.Layer.Readdir(-1) + if err != nil { + return nil, err + } + } + + var bfi []os.FileInfo + if f.Base != nil { + bfi, err = f.Base.Readdir(-1) + if err != nil { + return nil, err + } + + } + merged, err := merge(lfi, bfi) + if err != nil { + return nil, err + } + f.files = append(f.files, merged...) + } + files := f.files[f.off:] + + if c <= 0 { + return files, nil + } + + if len(files) == 0 { + return nil, io.EOF + } + + if c > len(files) { + c = len(files) + } + + defer func() { f.off += c }() + return files[:c], nil +} + +func (f *UnionFile) Readdirnames(c int) ([]string, error) { + rfi, err := f.Readdir(c) + if err != nil { + return nil, err + } + var names []string + for _, fi := range rfi { + names = append(names, fi.Name()) + } + return names, nil +} + +func (f *UnionFile) Stat() (os.FileInfo, error) { + if f.Layer != nil { + return f.Layer.Stat() + } + if f.Base != nil { + return f.Base.Stat() + } + return nil, BADFD +} + +func (f *UnionFile) Sync() (err error) { + if f.Layer != nil { + err = f.Layer.Sync() + if err == nil && f.Base != nil { + err = f.Base.Sync() + } + return err + } + if f.Base != nil { + return f.Base.Sync() + } + return BADFD +} + +func (f *UnionFile) Truncate(s int64) (err error) { + if f.Layer != nil { + err = f.Layer.Truncate(s) + if err == nil && f.Base != nil { + err = f.Base.Truncate(s) + } + return err + } + if f.Base != nil { + return f.Base.Truncate(s) + } + return BADFD +} + +func (f *UnionFile) WriteString(s string) (n int, err error) { + if f.Layer != nil { + n, err = f.Layer.WriteString(s) + if err == nil && f.Base != nil { + _, err = f.Base.WriteString(s) + } + return n, err + } + if f.Base != nil { + return f.Base.WriteString(s) + } + return 0, BADFD +} + +func copyFile(base Fs, layer Fs, name string, bfh File) error { + // First make sure the directory exists + exists, err := Exists(layer, filepath.Dir(name)) + if err != nil { + return err + } + if !exists { + err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME? + if err != nil { + return err + } + } + + // Create the file on the overlay + lfh, err := layer.Create(name) + if err != nil { + return err + } + n, err := io.Copy(lfh, bfh) + if err != nil { + // If anything fails, clean up the file + layer.Remove(name) + lfh.Close() + return err + } + + bfi, err := bfh.Stat() + if err != nil || bfi.Size() != n { + layer.Remove(name) + lfh.Close() + return syscall.EIO + } + + err = lfh.Close() + if err != nil { + layer.Remove(name) + lfh.Close() + return err + } + return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime()) +} + +func copyToLayer(base Fs, layer Fs, name string) error { + bfh, err := base.Open(name) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} + +func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error { + bfh, err := base.OpenFile(name, flag, perm) + if err != nil { + return err + } + defer bfh.Close() + + return copyFile(base, layer, name, bfh) +} diff --git a/vendor/github.com/spf13/afero/util.go b/vendor/github.com/spf13/afero/util.go new file mode 100644 index 00000000..9e4cba27 --- /dev/null +++ b/vendor/github.com/spf13/afero/util.go @@ -0,0 +1,329 @@ +// Copyright ©2015 Steve Francia +// Portions Copyright ©2015 The Hugo Authors +// Portions Copyright 2016-present Bjørn Erik Pedersen +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package afero + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + "unicode" + + "golang.org/x/text/runes" + "golang.org/x/text/transform" + "golang.org/x/text/unicode/norm" +) + +// Filepath separator defined by os.Separator. +const FilePathSeparator = string(filepath.Separator) + +// Takes a reader and a path and writes the content +func (a Afero) WriteReader(path string, r io.Reader) (err error) { + return WriteReader(a.Fs, path, r) +} + +func WriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r + if err != nil { + if err != os.ErrExist { + return err + } + } + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +// Same as WriteReader but checks to see if file/directory already exists. +func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) { + return SafeWriteReader(a.Fs, path, r) +} + +func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) { + dir, _ := filepath.Split(path) + ospath := filepath.FromSlash(dir) + + if ospath != "" { + err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r + if err != nil { + return + } + } + + exists, err := Exists(fs, path) + if err != nil { + return + } + if exists { + return fmt.Errorf("%v already exists", path) + } + + file, err := fs.Create(path) + if err != nil { + return + } + defer file.Close() + + _, err = io.Copy(file, r) + return +} + +func (a Afero) GetTempDir(subPath string) string { + return GetTempDir(a.Fs, subPath) +} + +// GetTempDir returns the default temp directory with trailing slash +// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx +func GetTempDir(fs Fs, subPath string) string { + addSlash := func(p string) string { + if FilePathSeparator != p[len(p)-1:] { + p = p + FilePathSeparator + } + return p + } + dir := addSlash(os.TempDir()) + + if subPath != "" { + // preserve windows backslash :-( + if FilePathSeparator == "\\" { + subPath = strings.Replace(subPath, "\\", "____", -1) + } + dir = dir + UnicodeSanitize((subPath)) + if FilePathSeparator == "\\" { + dir = strings.Replace(dir, "____", "\\", -1) + } + + if exists, _ := Exists(fs, dir); exists { + return addSlash(dir) + } + + err := fs.MkdirAll(dir, 0o777) + if err != nil { + panic(err) + } + dir = addSlash(dir) + } + return dir +} + +// Rewrite string to remove non-standard path characters +func UnicodeSanitize(s string) string { + source := []rune(s) + target := make([]rune, 0, len(source)) + + for _, r := range source { + if unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsMark(r) || + r == '.' || + r == '/' || + r == '\\' || + r == '_' || + r == '-' || + r == '%' || + r == ' ' || + r == '#' { + target = append(target, r) + } + } + + return string(target) +} + +// Transform characters with accents into plain forms. +func NeuterAccents(s string) string { + t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC) + result, _, _ := transform.String(t, string(s)) + + return result +} + +func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) { + return FileContainsBytes(a.Fs, filename, subslice) +} + +// Check if a file contains a specified byte slice. +func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslice), nil +} + +func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) { + return FileContainsAnyBytes(a.Fs, filename, subslices) +} + +// Check if a file contains any of the specified byte slices. +func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) { + f, err := fs.Open(filename) + if err != nil { + return false, err + } + defer f.Close() + + return readerContainsAny(f, subslices...), nil +} + +// readerContains reports whether any of the subslices is within r. +func readerContainsAny(r io.Reader, subslices ...[]byte) bool { + if r == nil || len(subslices) == 0 { + return false + } + + largestSlice := 0 + + for _, sl := range subslices { + if len(sl) > largestSlice { + largestSlice = len(sl) + } + } + + if largestSlice == 0 { + return false + } + + bufflen := largestSlice * 4 + halflen := bufflen / 2 + buff := make([]byte, bufflen) + var err error + var n, i int + + for { + i++ + if i == 1 { + n, err = io.ReadAtLeast(r, buff[:halflen], halflen) + } else { + if i != 2 { + // shift left to catch overlapping matches + copy(buff[:], buff[halflen:]) + } + n, err = io.ReadAtLeast(r, buff[halflen:], halflen) + } + + if n > 0 { + for _, sl := range subslices { + if bytes.Contains(buff, sl) { + return true + } + } + } + + if err != nil { + break + } + } + return false +} + +func (a Afero) DirExists(path string) (bool, error) { + return DirExists(a.Fs, path) +} + +// DirExists checks if a path exists and is a directory. +func DirExists(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err == nil && fi.IsDir() { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func (a Afero) IsDir(path string) (bool, error) { + return IsDir(a.Fs, path) +} + +// IsDir checks if a given path is a directory. +func IsDir(fs Fs, path string) (bool, error) { + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + return fi.IsDir(), nil +} + +func (a Afero) IsEmpty(path string) (bool, error) { + return IsEmpty(a.Fs, path) +} + +// IsEmpty checks if a given file or directory is empty. +func IsEmpty(fs Fs, path string) (bool, error) { + if b, _ := Exists(fs, path); !b { + return false, fmt.Errorf("%q path does not exist", path) + } + fi, err := fs.Stat(path) + if err != nil { + return false, err + } + if fi.IsDir() { + f, err := fs.Open(path) + if err != nil { + return false, err + } + defer f.Close() + list, err := f.Readdir(-1) + if err != nil { + return false, err + } + return len(list) == 0, nil + } + return fi.Size() == 0, nil +} + +func (a Afero) Exists(path string) (bool, error) { + return Exists(a.Fs, path) +} + +// Check if a file or directory exists. +func Exists(fs Fs, path string) (bool, error) { + _, err := fs.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string { + combinedPath := filepath.Join(basePathFs.path, relativePath) + if parent, ok := basePathFs.source.(*BasePathFs); ok { + return FullBaseFsPath(parent, combinedPath) + } + + return combinedPath +} diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go new file mode 100644 index 00000000..a7828345 --- /dev/null +++ b/vendor/golang.org/x/crypto/ed25519/ed25519.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ed25519 implements the Ed25519 signature algorithm. See +// https://ed25519.cr.yp.to/. +// +// These functions are also compatible with the “Ed25519” function defined in +// RFC 8032. However, unlike RFC 8032's formulation, this package's private key +// representation includes a public key suffix to make multiple signing +// operations with the same key more efficient. This package refers to the RFC +// 8032 private key as the “seed”. +// +// Beginning with Go 1.13, the functionality of this package was moved to the +// standard library as crypto/ed25519. This package only acts as a compatibility +// wrapper. +package ed25519 + +import ( + "crypto/ed25519" + "io" +) + +const ( + // PublicKeySize is the size, in bytes, of public keys as used in this package. + PublicKeySize = 32 + // PrivateKeySize is the size, in bytes, of private keys as used in this package. + PrivateKeySize = 64 + // SignatureSize is the size, in bytes, of signatures generated and verified by this package. + SignatureSize = 64 + // SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032. + SeedSize = 32 +) + +// PublicKey is the type of Ed25519 public keys. +// +// This type is an alias for crypto/ed25519's PublicKey type. +// See the crypto/ed25519 package for the methods on this type. +type PublicKey = ed25519.PublicKey + +// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. +// +// This type is an alias for crypto/ed25519's PrivateKey type. +// See the crypto/ed25519 package for the methods on this type. +type PrivateKey = ed25519.PrivateKey + +// GenerateKey generates a public/private key pair using entropy from rand. +// If rand is nil, crypto/rand.Reader will be used. +func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) { + return ed25519.GenerateKey(rand) +} + +// NewKeyFromSeed calculates a private key from a seed. It will panic if +// len(seed) is not SeedSize. This function is provided for interoperability +// with RFC 8032. RFC 8032's private keys correspond to seeds in this +// package. +func NewKeyFromSeed(seed []byte) PrivateKey { + return ed25519.NewKeyFromSeed(seed) +} + +// Sign signs the message with privateKey and returns a signature. It will +// panic if len(privateKey) is not PrivateKeySize. +func Sign(privateKey PrivateKey, message []byte) []byte { + return ed25519.Sign(privateKey, message) +} + +// Verify reports whether sig is a valid signature of message by publicKey. It +// will panic if len(publicKey) is not PublicKeySize. +func Verify(publicKey PublicKey, message, sig []byte) bool { + return ed25519.Verify(publicKey, message, sig) +} diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/golang.org/x/exp/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/exp/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 00000000..2c033dff --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 00000000..cff0cd49 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,258 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// ContainsFunc reports whether at least one +// element e of s satisfies f(e). +func ContainsFunc[E any](s []E, f func(E) bool) bool { + return IndexFunc(s, f) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +// When Compact discards m elements in total, it might not modify the elements +// s[len(s)-m:len(s)]. If those elements contain pointers you might consider +// zeroing those elements so that objects they reference can be garbage collected. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 00000000..f14f40da --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,126 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStableFunc sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // Inlining is faster than calling BinarySearchFunc with a lambda. + n := len(x) + // Define x[-1] < target and x[n] >= target. + // Invariant: x[i-1] < target, x[j] >= target. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if x[h] < target { + i = h + 1 // preserves x[i-1] < target + } else { + j = h // preserves x[j] >= target + } + } + // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. + return i, i < n && x[i] == target +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E, T any](x []E, target T, cmp func(E, T) int) (int, bool) { + n := len(x) + // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . + // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if cmp(x[h], target) < 0 { + i = h + 1 // preserves cmp(x[i - 1], target) < 0 + } else { + j = h // preserves cmp(x[j], target) >= 0 + } + } + // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. + return i, i < n && cmp(x[i], target) == 0 +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 00000000..2a632476 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 00000000..efaa1c8b --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go deleted file mode 100644 index 37dc0cfd..00000000 --- a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ctxhttp provides helper functions for performing context-aware HTTP requests. -package ctxhttp // import "golang.org/x/net/context/ctxhttp" - -import ( - "context" - "io" - "net/http" - "net/url" - "strings" -) - -// Do sends an HTTP request with the provided http.Client and returns -// an HTTP response. -// -// If the client is nil, http.DefaultClient is used. -// -// The provided ctx must be non-nil. If it is canceled or times out, -// ctx.Err() will be returned. -func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - resp, err := client.Do(req.WithContext(ctx)) - // If we got an error, and the context has been canceled, - // the context's error is probably more useful. - if err != nil { - select { - case <-ctx.Done(): - err = ctx.Err() - default: - } - } - return resp, err -} - -// Get issues a GET request via the Do function. -func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Head issues a HEAD request via the Do function. -func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { - req, err := http.NewRequest("HEAD", url, nil) - if err != nil { - return nil, err - } - return Do(ctx, client, req) -} - -// Post issues a POST request via the Do function. -func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { - req, err := http.NewRequest("POST", url, body) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", bodyType) - return Do(ctx, client, req) -} - -// PostForm issues a POST request via the Do function. -func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { - return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) -} diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/oauth2/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/oauth2/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index 355c3869..b4723fca 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -19,8 +19,6 @@ import ( "strings" "sync" "time" - - "golang.org/x/net/context/ctxhttp" ) // Token represents the credentials used to authorize @@ -229,7 +227,7 @@ func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, } func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { - r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + r, err := ContextClient(ctx).Do(req.WithContext(ctx)) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go new file mode 100644 index 00000000..df7aa02d --- /dev/null +++ b/vendor/golang.org/x/text/runes/cond.go @@ -0,0 +1,187 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runes + +import ( + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is. +// This is done for various reasons: +// - To retain the semantics of the Nop transformer: if input is passed to a Nop +// one would expect it to be unchanged. +// - It would be very expensive to pass a converted RuneError to a transformer: +// a transformer might need more source bytes after RuneError, meaning that +// the only way to pass it safely is to create a new buffer and manage the +// intermingling of RuneErrors and normal input. +// - Many transformers leave ill-formed UTF-8 as is, so this is not +// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a +// logical consequence of the operation (as for Map) or if it otherwise would +// pose security concerns (as for Remove). +// - An alternative would be to return an error on ill-formed UTF-8, but this +// would be inconsistent with other operations. + +// If returns a transformer that applies tIn to consecutive runes for which +// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset +// is called on tIn and tNotIn at the start of each run. A Nop transformer will +// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated +// to RuneError to determine which transformer to apply, but is passed as is to +// the respective transformer. +func If(s Set, tIn, tNotIn transform.Transformer) Transformer { + if tIn == nil && tNotIn == nil { + return Transformer{transform.Nop} + } + if tIn == nil { + tIn = transform.Nop + } + if tNotIn == nil { + tNotIn = transform.Nop + } + sIn, ok := tIn.(transform.SpanningTransformer) + if !ok { + sIn = dummySpan{tIn} + } + sNotIn, ok := tNotIn.(transform.SpanningTransformer) + if !ok { + sNotIn = dummySpan{tNotIn} + } + + a := &cond{ + tIn: sIn, + tNotIn: sNotIn, + f: s.Contains, + } + a.Reset() + return Transformer{a} +} + +type dummySpan struct{ transform.Transformer } + +func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) { + return 0, transform.ErrEndOfSpan +} + +type cond struct { + tIn, tNotIn transform.SpanningTransformer + f func(rune) bool + check func(rune) bool // current check to perform + t transform.SpanningTransformer // current transformer to use +} + +// Reset implements transform.Transformer. +func (t *cond) Reset() { + t.check = t.is + t.t = t.tIn + t.t.Reset() // notIn will be reset on first usage. +} + +func (t *cond) is(r rune) bool { + if t.f(r) { + return true + } + t.check = t.isNot + t.t = t.tNotIn + t.tNotIn.Reset() + return false +} + +func (t *cond) isNot(r rune) bool { + if !t.f(r) { + return true + } + t.check = t.is + t.t = t.tIn + t.tIn.Reset() + return false +} + +// This implementation of Span doesn't help all too much, but it needs to be +// there to satisfy this package's Transformer interface. +// TODO: there are certainly room for improvements, though. For example, if +// t.t == transform.Nop (which will a common occurrence) it will save a bundle +// to special-case that loop. +func (t *cond) Span(src []byte, atEOF bool) (n int, err error) { + p := 0 + for n < len(src) && err == nil { + // Don't process too much at a time as the Spanner that will be + // called on this block may terminate early. + const maxChunk = 4096 + max := len(src) + if v := n + maxChunk; v < max { + max = v + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src))) + n += n2 + if err2 != nil { + return n, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = n + size + } + return n, err +} + +func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + p := 0 + for nSrc < len(src) && err == nil { + // Don't process too much at a time, as the work might be wasted if the + // destination buffer isn't large enough to hold the result or a + // transform returns an error early. + const maxChunk = 4096 + max := len(src) + if n := nSrc + maxChunk; n < len(src) { + max = n + } + atEnd := false + size := 0 + current := t.t + for ; p < max; p += size { + r := rune(src[p]) + if r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[p:]); size == 1 { + if !atEOF && !utf8.FullRune(src[p:]) { + err = transform.ErrShortSrc + break + } + } + if !t.check(r) { + // The next rune will be the start of a new run. + atEnd = true + break + } + } + nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src))) + nDst += nDst2 + nSrc += nSrc2 + if err2 != nil { + return nDst, nSrc, err2 + } + // At this point either err != nil or t.check will pass for the rune at p. + p = nSrc + size + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go new file mode 100644 index 00000000..930e87fe --- /dev/null +++ b/vendor/golang.org/x/text/runes/runes.go @@ -0,0 +1,355 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package runes provide transforms for UTF-8 encoded text. +package runes // import "golang.org/x/text/runes" + +import ( + "unicode" + "unicode/utf8" + + "golang.org/x/text/transform" +) + +// A Set is a collection of runes. +type Set interface { + // Contains returns true if r is contained in the set. + Contains(r rune) bool +} + +type setFunc func(rune) bool + +func (s setFunc) Contains(r rune) bool { + return s(r) +} + +// Note: using funcs here instead of wrapping types result in cleaner +// documentation and a smaller API. + +// In creates a Set with a Contains method that returns true for all runes in +// the given RangeTable. +func In(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return unicode.Is(rt, r) }) +} + +// NotIn creates a Set with a Contains method that returns true for all runes not +// in the given RangeTable. +func NotIn(rt *unicode.RangeTable) Set { + return setFunc(func(r rune) bool { return !unicode.Is(rt, r) }) +} + +// Predicate creates a Set with a Contains method that returns f(r). +func Predicate(f func(rune) bool) Set { + return setFunc(f) +} + +// Transformer implements the transform.Transformer interface. +type Transformer struct { + t transform.SpanningTransformer +} + +func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + return t.t.Transform(dst, src, atEOF) +} + +func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) { + return t.t.Span(b, atEOF) +} + +func (t Transformer) Reset() { t.t.Reset() } + +// Bytes returns a new byte slice with the result of converting b using t. It +// calls Reset on t. It returns nil if any error was found. This can only happen +// if an error-producing Transformer is passed to If. +func (t Transformer) Bytes(b []byte) []byte { + b, _, err := transform.Bytes(t, b) + if err != nil { + return nil + } + return b +} + +// String returns a string with the result of converting s using t. It calls +// Reset on t. It returns the empty string if any error was found. This can only +// happen if an error-producing Transformer is passed to If. +func (t Transformer) String(s string) string { + s, _, err := transform.String(t, s) + if err != nil { + return "" + } + return s +} + +// TODO: +// - Copy: copying strings and bytes in whole-rune units. +// - Validation (maybe) +// - Well-formed-ness (maybe) + +const runeErrorString = string(utf8.RuneError) + +// Remove returns a Transformer that removes runes r for which s.Contains(r). +// Illegal input bytes are replaced by RuneError before being passed to f. +func Remove(s Set) Transformer { + if f, ok := s.(setFunc); ok { + // This little trick cuts the running time of BenchmarkRemove for sets + // created by Predicate roughly in half. + // TODO: special-case RangeTables as well. + return Transformer{remove(f)} + } + return Transformer{remove(s.Contains)} +} + +// TODO: remove transform.RemoveFunc. + +type remove func(r rune) bool + +func (remove) Reset() {} + +// Span implements transform.Spanner. +func (t remove) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) { + err = transform.ErrEndOfSpan + break + } + n += size + } + return +} + +// Transform implements transform.Transformer. +func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + // We replace illegal bytes with RuneError. Not doing so might + // otherwise turn a sequence of invalid UTF-8 into valid UTF-8. + // The resulting byte sequence may subsequently contain runes + // for which t(r) is true that were passed unnoticed. + if !t(utf8.RuneError) { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + } + nSrc++ + continue + } + if t(r) { + nSrc += size + continue + } + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + } + return +} + +// Map returns a Transformer that maps the runes in the input using the given +// mapping. Illegal bytes in the input are converted to utf8.RuneError before +// being passed to the mapping func. +func Map(mapping func(rune) rune) Transformer { + return Transformer{mapper(mapping)} +} + +type mapper func(rune) rune + +func (mapper) Reset() {} + +// Span implements transform.Spanner. +func (t mapper) Span(src []byte, atEOF bool) (n int, err error) { + for r, size := rune(0), 0; n < len(src); n += size { + if r = rune(src[n]); r < utf8.RuneSelf { + size = 1 + } else if r, size = utf8.DecodeRune(src[n:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + } else { + err = transform.ErrEndOfSpan + } + break + } + if t(r) != r { + err = transform.ErrEndOfSpan + break + } + } + return n, err +} + +// Transform implements transform.Transformer. +func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + var replacement rune + var b [utf8.UTFMax]byte + + for r, size := rune(0), 0; nSrc < len(src); { + if r = rune(src[nSrc]); r < utf8.RuneSelf { + if replacement = t(r); replacement < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = byte(replacement) + nDst++ + nSrc++ + continue + } + size = 1 + } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 { + // Invalid rune. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + if replacement = t(utf8.RuneError); replacement == utf8.RuneError { + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + continue + } + } else if replacement = t(r); replacement == r { + if nDst+size > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < size; i++ { + dst[nDst] = src[nSrc] + nDst++ + nSrc++ + } + continue + } + + n := utf8.EncodeRune(b[:], replacement) + + if nDst+n > len(dst) { + err = transform.ErrShortDst + break + } + for i := 0; i < n; i++ { + dst[nDst] = b[i] + nDst++ + } + nSrc += size + } + return +} + +// ReplaceIllFormed returns a transformer that replaces all input bytes that are +// not part of a well-formed UTF-8 code sequence with utf8.RuneError. +func ReplaceIllFormed() Transformer { + return Transformer{&replaceIllFormed{}} +} + +type replaceIllFormed struct{ transform.NopResetter } + +func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) { + for n < len(src) { + // ASCII fast path. + if src[n] < utf8.RuneSelf { + n++ + continue + } + + r, size := utf8.DecodeRune(src[n:]) + + // Look for a valid non-ASCII rune. + if r != utf8.RuneError || size != 1 { + n += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[n:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + err = transform.ErrEndOfSpan + break + } + return n, err +} + +func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { + for nSrc < len(src) { + // ASCII fast path. + if r := src[nSrc]; r < utf8.RuneSelf { + if nDst == len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst] = r + nDst++ + nSrc++ + continue + } + + // Look for a valid non-ASCII rune. + if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 { + if size != copy(dst[nDst:], src[nSrc:nSrc+size]) { + err = transform.ErrShortDst + break + } + nDst += size + nSrc += size + continue + } + + // Look for short source data. + if !atEOF && !utf8.FullRune(src[nSrc:]) { + err = transform.ErrShortSrc + break + } + + // We have an invalid rune. + if nDst+3 > len(dst) { + err = transform.ErrShortDst + break + } + dst[nDst+0] = runeErrorString[0] + dst[nDst+1] = runeErrorString[1] + dst[nDst+2] = runeErrorString[2] + nDst += 3 + nSrc++ + } + return nDst, nSrc, err +} diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS deleted file mode 100644 index 15167cd7..00000000 --- a/vendor/golang.org/x/time/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS deleted file mode 100644 index 1c4577e9..00000000 --- a/vendor/golang.org/x/time/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index e77ade39..f0e0cf3c 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -80,6 +80,19 @@ func (lim *Limiter) Burst() int { return lim.burst } +// TokensAt returns the number of tokens available at time t. +func (lim *Limiter) TokensAt(t time.Time) float64 { + lim.mu.Lock() + _, tokens := lim.advance(t) // does not mutate lim + lim.mu.Unlock() + return tokens +} + +// Tokens returns the number of tokens available now. +func (lim *Limiter) Tokens() float64 { + return lim.TokensAt(time.Now()) +} + // NewLimiter returns a new Limiter that allows events up to rate r and permits // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { @@ -89,16 +102,16 @@ func NewLimiter(r Limit, b int) *Limiter { } } -// Allow is shorthand for AllowN(time.Now(), 1). +// Allow reports whether an event may happen now. func (lim *Limiter) Allow() bool { return lim.AllowN(time.Now(), 1) } -// AllowN reports whether n events may happen at time now. +// AllowN reports whether n events may happen at time t. // Use this method if you intend to drop / skip events that exceed the rate limit. // Otherwise use Reserve or Wait. -func (lim *Limiter) AllowN(now time.Time, n int) bool { - return lim.reserveN(now, n, 0).ok +func (lim *Limiter) AllowN(t time.Time, n int) bool { + return lim.reserveN(t, n, 0).ok } // A Reservation holds information about events that are permitted by a Limiter to happen after a delay. @@ -125,17 +138,17 @@ func (r *Reservation) Delay() time.Duration { } // InfDuration is the duration returned by Delay when a Reservation is not OK. -const InfDuration = time.Duration(1<<63 - 1) +const InfDuration = time.Duration(math.MaxInt64) // DelayFrom returns the duration for which the reservation holder must wait // before taking the reserved action. Zero duration means act immediately. // InfDuration means the limiter cannot grant the tokens requested in this // Reservation within the maximum wait time. -func (r *Reservation) DelayFrom(now time.Time) time.Duration { +func (r *Reservation) DelayFrom(t time.Time) time.Duration { if !r.ok { return InfDuration } - delay := r.timeToAct.Sub(now) + delay := r.timeToAct.Sub(t) if delay < 0 { return 0 } @@ -150,7 +163,7 @@ func (r *Reservation) Cancel() { // CancelAt indicates that the reservation holder will not perform the reserved action // and reverses the effects of this Reservation on the rate limit as much as possible, // considering that other reservations may have already been made. -func (r *Reservation) CancelAt(now time.Time) { +func (r *Reservation) CancelAt(t time.Time) { if !r.ok { return } @@ -158,7 +171,7 @@ func (r *Reservation) CancelAt(now time.Time) { r.lim.mu.Lock() defer r.lim.mu.Unlock() - if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(t) { return } @@ -170,18 +183,18 @@ func (r *Reservation) CancelAt(now time.Time) { return } // advance time to now - now, _, tokens := r.lim.advance(now) + t, tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { tokens = burst } // update state - r.lim.last = now + r.lim.last = t r.lim.tokens = tokens if r.timeToAct == r.lim.lastEvent { prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) - if !prevEvent.Before(now) { + if !prevEvent.Before(t) { r.lim.lastEvent = prevEvent } } @@ -208,8 +221,8 @@ func (lim *Limiter) Reserve() *Reservation { // Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. // If you need to respect a deadline or cancel the delay, use Wait instead. // To drop or skip events exceeding rate limit, use Allow instead. -func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { - r := lim.reserveN(now, n, InfDuration) +func (lim *Limiter) ReserveN(t time.Time, n int) *Reservation { + r := lim.reserveN(t, n, InfDuration) return &r } @@ -223,6 +236,18 @@ func (lim *Limiter) Wait(ctx context.Context) (err error) { // canceled, or the expected wait time exceeds the Context's Deadline. // The burst limit is ignored if the rate limit is Inf. func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + // The test code calls lim.wait with a fake timer generator. + // This is the real timer generator. + newTimer := func(d time.Duration) (<-chan time.Time, func() bool, func()) { + timer := time.NewTimer(d) + return timer.C, timer.Stop, func() {} + } + + return lim.wait(ctx, n, time.Now(), newTimer) +} + +// wait is the internal implementation of WaitN. +func (lim *Limiter) wait(ctx context.Context, n int, t time.Time, newTimer func(d time.Duration) (<-chan time.Time, func() bool, func())) error { lim.mu.Lock() burst := lim.burst limit := lim.limit @@ -238,25 +263,25 @@ func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { default: } // Determine wait limit - now := time.Now() waitLimit := InfDuration if deadline, ok := ctx.Deadline(); ok { - waitLimit = deadline.Sub(now) + waitLimit = deadline.Sub(t) } // Reserve - r := lim.reserveN(now, n, waitLimit) + r := lim.reserveN(t, n, waitLimit) if !r.ok { return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) } // Wait if necessary - delay := r.DelayFrom(now) + delay := r.DelayFrom(t) if delay == 0 { return nil } - t := time.NewTimer(delay) - defer t.Stop() + ch, stop, advance := newTimer(delay) + defer stop() + advance() // only has an effect when testing select { - case <-t.C: + case <-ch: // We can proceed. return nil case <-ctx.Done(): @@ -275,13 +300,13 @@ func (lim *Limiter) SetLimit(newLimit Limit) { // SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated // or underutilized by those which reserved (using Reserve or Wait) but did not yet act // before SetLimitAt was called. -func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { +func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.limit = newLimit } @@ -292,13 +317,13 @@ func (lim *Limiter) SetBurst(newBurst int) { } // SetBurstAt sets a new burst size for the limiter. -func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { +func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - now, _, tokens := lim.advance(now) + t, tokens := lim.advance(t) - lim.last = now + lim.last = t lim.tokens = tokens lim.burst = newBurst } @@ -306,7 +331,7 @@ func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { // reserveN is a helper method for AllowN, ReserveN, and WaitN. // maxFutureReserve specifies the maximum reservation wait duration allowed. // reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. -func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { +func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) Reservation { lim.mu.Lock() defer lim.mu.Unlock() @@ -315,7 +340,7 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: true, lim: lim, tokens: n, - timeToAct: now, + timeToAct: t, } } else if lim.limit == 0 { var ok bool @@ -327,11 +352,11 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio ok: ok, lim: lim, tokens: lim.burst, - timeToAct: now, + timeToAct: t, } } - now, last, tokens := lim.advance(now) + t, tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -353,16 +378,12 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio } if ok { r.tokens = n - r.timeToAct = now.Add(waitDuration) - } + r.timeToAct = t.Add(waitDuration) - // Update state - if ok { - lim.last = now + // Update state + lim.last = t lim.tokens = tokens lim.lastEvent = r.timeToAct - } else { - lim.last = last } return r @@ -371,20 +392,20 @@ func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duratio // advance calculates and returns an updated state for lim resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { last := lim.last - if now.Before(last) { - last = now + if t.Before(last) { + last = t } // Calculate the new number of tokens, due to time that passed. - elapsed := now.Sub(last) + elapsed := t.Sub(last) delta := lim.limit.tokensFromDuration(elapsed) tokens := lim.tokens + delta if burst := float64(lim.burst); tokens > burst { tokens = burst } - return now, last, tokens + return t, tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration diff --git a/vendor/golang.org/x/time/rate/sometimes.go b/vendor/golang.org/x/time/rate/sometimes.go new file mode 100644 index 00000000..6ba99ddb --- /dev/null +++ b/vendor/golang.org/x/time/rate/sometimes.go @@ -0,0 +1,67 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rate + +import ( + "sync" + "time" +) + +// Sometimes will perform an action occasionally. The First, Every, and +// Interval fields govern the behavior of Do, which performs the action. +// A zero Sometimes value will perform an action exactly once. +// +// # Example: logging with rate limiting +// +// var sometimes = rate.Sometimes{First: 3, Interval: 10*time.Second} +// func Spammy() { +// sometimes.Do(func() { log.Info("here I am!") }) +// } +type Sometimes struct { + First int // if non-zero, the first N calls to Do will run f. + Every int // if non-zero, every Nth call to Do will run f. + Interval time.Duration // if non-zero and Interval has elapsed since f's last run, Do will run f. + + mu sync.Mutex + count int // number of Do calls + last time.Time // last time f was run +} + +// Do runs the function f as allowed by First, Every, and Interval. +// +// The model is a union (not intersection) of filters. The first call to Do +// always runs f. Subsequent calls to Do run f if allowed by First or Every or +// Interval. +// +// A non-zero First:N causes the first N Do(f) calls to run f. +// +// A non-zero Every:M causes every Mth Do(f) call, starting with the first, to +// run f. +// +// A non-zero Interval causes Do(f) to run f if Interval has elapsed since +// Do last ran f. +// +// Specifying multiple filters produces the union of these execution streams. +// For example, specifying both First:N and Every:M causes the first N Do(f) +// calls and every Mth Do(f) call, starting with the first, to run f. See +// Examples for more. +// +// If Do is called multiple times simultaneously, the calls will block and run +// serially. Therefore, Do is intended for lightweight operations. +// +// Because a call to Do may block until f returns, if f causes Do to be called, +// it will deadlock. +func (s *Sometimes) Do(f func()) { + s.mu.Lock() + defer s.mu.Unlock() + if s.count == 0 || + (s.First > 0 && s.count < s.First) || + (s.Every > 0 && s.count%s.Every == 0) || + (s.Interval > 0 && time.Since(s.last) >= s.Interval) { + f() + s.last = time.Now() + } + s.count++ +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go new file mode 100644 index 00000000..2741ee2c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/apimachinery/pkg/apis/meta/v1 + +package internalversion // import "k8s.io/apimachinery/pkg/apis/meta/internalversion" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go new file mode 100644 index 00000000..a59ac712 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/register.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// addToGroupVersion registers common meta types into schemas. +func addToGroupVersion(scheme *runtime.Scheme) error { + if err := scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{}); err != nil { + return err + } + // ListOptions is the only options struct which needs conversion (it exposes labels and fields + // as selectors for convenience). The other types have only a single representation today. + scheme.AddKnownTypes(SchemeGroupVersion, + &ListOptions{}, + &metav1.GetOptions{}, + &metav1.DeleteOptions{}, + &metav1.CreateOptions{}, + &metav1.UpdateOptions{}, + ) + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1.Table{}, + &metav1.TableOptions{}, + &metav1beta1.PartialObjectMetadata{}, + &metav1beta1.PartialObjectMetadataList{}, + ) + if err := metav1beta1.AddMetaToScheme(scheme); err != nil { + return err + } + if err := metav1.AddMetaToScheme(scheme); err != nil { + return err + } + // Allow delete options to be decoded across all version in this scheme (we may want to be more clever than this) + scheme.AddUnversionedTypes(SchemeGroupVersion, + &metav1.DeleteOptions{}, + &metav1.CreateOptions{}, + &metav1.UpdateOptions{}) + + metav1.AddToGroupVersion(scheme, metav1.SchemeGroupVersion) + if err := metav1beta1.RegisterConversions(scheme); err != nil { + return err + } + return nil +} + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + localSchemeBuilder.Register(addToGroupVersion) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go new file mode 100644 index 00000000..a45fa2a8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme // import "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go new file mode 100644 index 00000000..472a9aeb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme/register.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/internalversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Scheme is the registry for any type that adheres to the meta API spec. +var scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme. +var Codecs = serializer.NewCodecFactory(scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +// Unlike other API groups, meta internal knows about all meta external versions, but keeps +// the logic for conversion private. +func init() { + utilruntime.Must(internalversion.AddToScheme(scheme)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go new file mode 100644 index 00000000..a49b5f2b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/types.go @@ -0,0 +1,80 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internalversion + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + metav1.TypeMeta + + // A selector based on labels + LabelSelector labels.Selector + // A selector based on fields + FieldSelector fields.Selector + // If true, watch for changes to this list + Watch bool + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + AllowWatchBookmarks bool + // resourceVersion sets a constraint on what resource versions a request may be served from. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + ResourceVersion string + // resourceVersionMatch determines how resourceVersion is applied to list calls. + // It is highly recommended that resourceVersionMatch be set for list calls where + // resourceVersion is set. + // See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for + // details. + ResourceVersionMatch metav1.ResourceVersionMatch + + // Timeout for the list/watch call. + TimeoutSeconds *int64 + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. + Continue string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + metav1.TypeMeta + // +optional + metav1.ListMeta + + Items []runtime.Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go new file mode 100644 index 00000000..6d212b84 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.conversion.go @@ -0,0 +1,146 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package internalversion + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*List)(nil), (*v1.List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_internalversion_List_To_v1_List(a.(*List), b.(*v1.List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.List)(nil), (*List)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_List_To_internalversion_List(a.(*v1.List), b.(*List), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ListOptions)(nil), (*v1.ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_internalversion_ListOptions_To_v1_ListOptions(a.(*ListOptions), b.(*v1.ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ListOptions)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListOptions_To_internalversion_ListOptions(a.(*v1.ListOptions), b.(*ListOptions), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_internalversion_List_To_v1_List is an autogenerated conversion function. +func Convert_internalversion_List_To_v1_List(in *List, out *v1.List, s conversion.Scope) error { + return autoConvert_internalversion_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_internalversion_List is an autogenerated conversion function. +func Convert_v1_List_To_internalversion_List(in *v1.List, out *List, s conversion.Scope) error { + return autoConvert_v1_List_To_internalversion_List(in, out, s) +} + +func autoConvert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks + out.ResourceVersion = in.ResourceVersion + out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch) + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +// Convert_internalversion_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_internalversion_ListOptions_To_v1_ListOptions(in *ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_internalversion_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + if err := v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.Watch = in.Watch + out.AllowWatchBookmarks = in.AllowWatchBookmarks + out.ResourceVersion = in.ResourceVersion + out.ResourceVersionMatch = v1.ResourceVersionMatch(in.ResourceVersionMatch) + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + out.Limit = in.Limit + out.Continue = in.Continue + return nil +} + +// Convert_v1_ListOptions_To_internalversion_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_internalversion_ListOptions(in *v1.ListOptions, out *ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_internalversion_ListOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go new file mode 100644 index 00000000..6e1eac5c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -0,0 +1,97 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package internalversion + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.LabelSelector != nil { + out.LabelSelector = in.LabelSelector.DeepCopySelector() + } + if in.FieldSelector != nil { + out.FieldSelector = in.FieldSelector.DeepCopySelector() + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go new file mode 100644 index 00000000..5cac6fba --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "unsafe" + + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" +) + +// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions +func Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(in *PartialObjectMetadataList, out *v1.PartialObjectMetadataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList allows converting PartialObjectMetadataList between versions +func Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(in *v1.PartialObjectMetadataList, out *PartialObjectMetadataList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PartialObjectMetadata)(unsafe.Pointer(&in.Items)) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go new file mode 100644 index 00000000..2b7e8ca0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -0,0 +1,17 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go new file mode 100644 index 00000000..20c9d2ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io + +package v1beta1 // import "k8s.io/apimachinery/pkg/apis/meta/v1beta1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go new file mode 100644 index 00000000..a2abc67c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -0,0 +1,412 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_90ec10f86b91f9a8, []int{0} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptor_90ec10f86b91f9a8) +} + +var fileDescriptor_90ec10f86b91f9a8 = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x4b, 0xf3, 0x30, + 0x1c, 0xc6, 0x9b, 0xf7, 0x65, 0x30, 0x3a, 0x04, 0xd9, 0x69, 0xee, 0x90, 0x0d, 0x4f, 0xdb, 0xc1, + 0x84, 0x0d, 0x11, 0xc1, 0xdb, 0x6e, 0x82, 0x32, 0xd9, 0x51, 0x3c, 0x98, 0x76, 0x7f, 0xbb, 0x58, + 0xd3, 0x94, 0xe4, 0xdf, 0x81, 0x37, 0x3f, 0x82, 0x1f, 0x6b, 0xc7, 0x1d, 0x07, 0xc2, 0x70, 0xf5, + 0x8b, 0x48, 0xda, 0x2a, 0x32, 0x14, 0x7a, 0xeb, 0xf3, 0x94, 0xdf, 0x2f, 0x4f, 0x20, 0xfe, 0x2c, + 0x3e, 0xb7, 0x4c, 0x6a, 0x1e, 0x67, 0x01, 0x98, 0x04, 0x10, 0x2c, 0x5f, 0x42, 0x32, 0xd7, 0x86, + 0x57, 0x3f, 0x44, 0x2a, 0x95, 0x08, 0x17, 0x32, 0x01, 0xf3, 0xcc, 0xd3, 0x38, 0x72, 0x85, 0xe5, + 0x0a, 0x50, 0xf0, 0xe5, 0x28, 0x00, 0x14, 0x23, 0x1e, 0x41, 0x02, 0x46, 0x20, 0xcc, 0x59, 0x6a, + 0x34, 0xea, 0xf6, 0xb0, 0x44, 0xd9, 0x4f, 0x94, 0xa5, 0x71, 0xe4, 0x0a, 0xcb, 0x1c, 0xca, 0x2a, + 0xb4, 0x7b, 0x12, 0x49, 0x5c, 0x64, 0x01, 0x0b, 0xb5, 0xe2, 0x91, 0x8e, 0x34, 0x2f, 0x0c, 0x41, + 0xf6, 0x50, 0xa4, 0x22, 0x14, 0x5f, 0xa5, 0xb9, 0x7b, 0x5a, 0x67, 0xd4, 0xfe, 0x9e, 0xee, 0xd9, + 0x5f, 0x94, 0xc9, 0x12, 0x94, 0x0a, 0xb8, 0x0d, 0x17, 0xa0, 0xc4, 0x3e, 0x77, 0xfc, 0x46, 0xfc, + 0xa3, 0x1b, 0x61, 0x50, 0x8a, 0xa7, 0x69, 0xf0, 0x08, 0x21, 0x5e, 0x03, 0x8a, 0xb9, 0x40, 0x71, + 0x25, 0x2d, 0xb6, 0xef, 0xfc, 0xa6, 0xaa, 0x72, 0xe7, 0x5f, 0x9f, 0x0c, 0x5a, 0x63, 0xc6, 0xea, + 0x5c, 0x9c, 0x39, 0xda, 0x99, 0x26, 0x87, 0xab, 0x6d, 0xcf, 0xcb, 0xb7, 0xbd, 0xe6, 0x57, 0x33, + 0xfb, 0x36, 0xb6, 0xef, 0xfd, 0x86, 0x44, 0x50, 0xb6, 0x43, 0xfa, 0xff, 0x07, 0xad, 0xf1, 0x45, + 0x3d, 0xf5, 0xaf, 0x6b, 0x27, 0x07, 0xd5, 0x39, 0x8d, 0x4b, 0x67, 0x9c, 0x95, 0xe2, 0xc9, 0x74, + 0xb5, 0xa3, 0xde, 0x7a, 0x47, 0xbd, 0xcd, 0x8e, 0x7a, 0x2f, 0x39, 0x25, 0xab, 0x9c, 0x92, 0x75, + 0x4e, 0xc9, 0x26, 0xa7, 0xe4, 0x3d, 0xa7, 0xe4, 0xf5, 0x83, 0x7a, 0xb7, 0xc3, 0xda, 0xcf, 0xe0, + 0x33, 0x00, 0x00, 0xff, 0xff, 0x30, 0x97, 0x8b, 0x11, 0x4b, 0x02, 0x00, 0x00, +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, v1.PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto new file mode 100644 index 00000000..d14d4259 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -0,0 +1,41 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.apimachinery.pkg.apis.meta.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/apimachinery/pkg/apis/meta/v1beta1"; + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 2; + + // items contains each of the included items. + repeated k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata items = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go new file mode 100644 index 00000000..a4a412e3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return nil +} + +// RegisterConversions adds conversion functions to the given scheme. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*PartialObjectMetadataList)(nil), (*v1.PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PartialObjectMetadataList_To_v1_PartialObjectMetadataList(a.(*PartialObjectMetadataList), b.(*v1.PartialObjectMetadataList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.PartialObjectMetadataList)(nil), (*PartialObjectMetadataList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_PartialObjectMetadataList_To_v1beta1_PartialObjectMetadataList(a.(*v1.PartialObjectMetadataList), b.(*PartialObjectMetadataList), scope) + }); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go new file mode 100644 index 00000000..f16170a3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -0,0 +1,84 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1beta1 is alpha objects from meta that will be introduced. +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table = v1.Table + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition = v1.TableColumnDefinition + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow = v1.TableRow + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition = v1.TableRowCondition + +type RowConditionType = v1.RowConditionType + +type ConditionStatus = v1.ConditionStatus + +type IncludeObjectPolicy = v1.IncludeObjectPolicy + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions = v1.TableOptions + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata = v1.PartialObjectMetadata + +// IMPORTANT: PartialObjectMetadataList has different protobuf field ids in v1beta1 than +// v1 because ListMeta was accidentally omitted prior to 1.15. Therefore this type must +// remain independent of v1.PartialObjectMetadataList to preserve mappings. + +// PartialObjectMetadataList contains a list of objects containing only their metadata. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,2,opt,name=metadata"` + + // items contains each of the included items. + Items []v1.PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +const ( + RowCompleted = v1.RowCompleted + + ConditionTrue = v1.ConditionTrue + ConditionFalse = v1.ConditionFalse + ConditionUnknown = v1.ConditionUnknown + + IncludeNone = v1.IncludeNone + IncludeMetadata = v1.IncludeMetadata + IncludeObject = v1.IncludeObject +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..ef7e7c1e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..972b7f03 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,60 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go new file mode 100644 index 00000000..198b5be4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go new file mode 100644 index 00000000..0d2f153b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/expiring.go @@ -0,0 +1,192 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/heap" + "sync" + "time" + + "k8s.io/utils/clock" +) + +// NewExpiring returns an initialized expiring cache. +func NewExpiring() *Expiring { + return NewExpiringWithClock(clock.RealClock{}) +} + +// NewExpiringWithClock is like NewExpiring but allows passing in a custom +// clock for testing. +func NewExpiringWithClock(clock clock.Clock) *Expiring { + return &Expiring{ + clock: clock, + cache: make(map[interface{}]entry), + } +} + +// Expiring is a map whose entries expire after a per-entry timeout. +type Expiring struct { + clock clock.Clock + + // mu protects the below fields + mu sync.RWMutex + // cache is the internal map that backs the cache. + cache map[interface{}]entry + // generation is used as a cheap resource version for cache entries. Cleanups + // are scheduled with a key and generation. When the cleanup runs, it first + // compares its generation with the current generation of the entry. It + // deletes the entry iff the generation matches. This prevents cleanups + // scheduled for earlier versions of an entry from deleting later versions of + // an entry when Set() is called multiple times with the same key. + // + // The integer value of the generation of an entry is meaningless. + generation uint64 + + heap expiringHeap +} + +type entry struct { + val interface{} + expiry time.Time + generation uint64 +} + +// Get looks up an entry in the cache. +func (c *Expiring) Get(key interface{}) (val interface{}, ok bool) { + c.mu.RLock() + defer c.mu.RUnlock() + e, ok := c.cache[key] + if !ok || !c.clock.Now().Before(e.expiry) { + return nil, false + } + return e.val, true +} + +// Set sets a key/value/expiry entry in the map, overwriting any previous entry +// with the same key. The entry expires at the given expiry time, but its TTL +// may be lengthened or shortened by additional calls to Set(). Garbage +// collection of expired entries occurs during calls to Set(), however calls to +// Get() will not return expired entries that have not yet been garbage +// collected. +func (c *Expiring) Set(key interface{}, val interface{}, ttl time.Duration) { + now := c.clock.Now() + expiry := now.Add(ttl) + + c.mu.Lock() + defer c.mu.Unlock() + + c.generation++ + + c.cache[key] = entry{ + val: val, + expiry: expiry, + generation: c.generation, + } + + // Run GC inline before pushing the new entry. + c.gc(now) + + heap.Push(&c.heap, &expiringHeapEntry{ + key: key, + expiry: expiry, + generation: c.generation, + }) +} + +// Delete deletes an entry in the map. +func (c *Expiring) Delete(key interface{}) { + c.mu.Lock() + defer c.mu.Unlock() + c.del(key, 0) +} + +// del deletes the entry for the given key. The generation argument is the +// generation of the entry that should be deleted. If the generation has been +// changed (e.g. if a set has occurred on an existing element but the old +// cleanup still runs), this is a noop. If the generation argument is 0, the +// entry's generation is ignored and the entry is deleted. +// +// del must be called under the write lock. +func (c *Expiring) del(key interface{}, generation uint64) { + e, ok := c.cache[key] + if !ok { + return + } + if generation != 0 && generation != e.generation { + return + } + delete(c.cache, key) +} + +// Len returns the number of items in the cache. +func (c *Expiring) Len() int { + c.mu.RLock() + defer c.mu.RUnlock() + return len(c.cache) +} + +func (c *Expiring) gc(now time.Time) { + for { + // Return from gc if the heap is empty or the next element is not yet + // expired. + // + // heap[0] is a peek at the next element in the heap, which is not obvious + // from looking at the (*expiringHeap).Pop() implementation below. + // heap.Pop() swaps the first entry with the last entry of the heap, then + // calls (*expiringHeap).Pop() which returns the last element. + if len(c.heap) == 0 || now.Before(c.heap[0].expiry) { + return + } + cleanup := heap.Pop(&c.heap).(*expiringHeapEntry) + c.del(cleanup.key, cleanup.generation) + } +} + +type expiringHeapEntry struct { + key interface{} + expiry time.Time + generation uint64 +} + +// expiringHeap is a min-heap ordered by expiration time of its entries. The +// expiring cache uses this as a priority queue to efficiently organize entries +// which will be garbage collected once they expire. +type expiringHeap []*expiringHeapEntry + +var _ heap.Interface = &expiringHeap{} + +func (cq expiringHeap) Len() int { + return len(cq) +} + +func (cq expiringHeap) Less(i, j int) bool { + return cq[i].expiry.Before(cq[j].expiry) +} + +func (cq expiringHeap) Swap(i, j int) { + cq[i], cq[j] = cq[j], cq[i] +} + +func (cq *expiringHeap) Push(c interface{}) { + *cq = append(*cq, c.(*expiringHeapEntry)) +} + +func (cq *expiringHeap) Pop() interface{} { + c := (*cq)[cq.Len()-1] + *cq = (*cq)[:cq.Len()-1] + return c +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go new file mode 100644 index 00000000..1328dd61 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/cache/lruexpirecache.go @@ -0,0 +1,160 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "container/list" + "sync" + "time" +) + +// Clock defines an interface for obtaining the current time +type Clock interface { + Now() time.Time +} + +// realClock implements the Clock interface by calling time.Now() +type realClock struct{} + +func (realClock) Now() time.Time { return time.Now() } + +// LRUExpireCache is a cache that ensures the mostly recently accessed keys are returned with +// a ttl beyond which keys are forcibly expired. +type LRUExpireCache struct { + // clock is used to obtain the current time + clock Clock + + lock sync.Mutex + + maxSize int + evictionList list.List + entries map[interface{}]*list.Element +} + +// NewLRUExpireCache creates an expiring cache with the given size +func NewLRUExpireCache(maxSize int) *LRUExpireCache { + return NewLRUExpireCacheWithClock(maxSize, realClock{}) +} + +// NewLRUExpireCacheWithClock creates an expiring cache with the given size, using the specified clock to obtain the current time. +func NewLRUExpireCacheWithClock(maxSize int, clock Clock) *LRUExpireCache { + if maxSize <= 0 { + panic("maxSize must be > 0") + } + + return &LRUExpireCache{ + clock: clock, + maxSize: maxSize, + entries: map[interface{}]*list.Element{}, + } +} + +type cacheEntry struct { + key interface{} + value interface{} + expireTime time.Time +} + +// Add adds the value to the cache at key with the specified maximum duration. +func (c *LRUExpireCache) Add(key interface{}, value interface{}, ttl time.Duration) { + c.lock.Lock() + defer c.lock.Unlock() + + // Key already exists + oldElement, ok := c.entries[key] + if ok { + c.evictionList.MoveToFront(oldElement) + oldElement.Value.(*cacheEntry).value = value + oldElement.Value.(*cacheEntry).expireTime = c.clock.Now().Add(ttl) + return + } + + // Make space if necessary + if c.evictionList.Len() >= c.maxSize { + toEvict := c.evictionList.Back() + c.evictionList.Remove(toEvict) + delete(c.entries, toEvict.Value.(*cacheEntry).key) + } + + // Add new entry + entry := &cacheEntry{ + key: key, + value: value, + expireTime: c.clock.Now().Add(ttl), + } + element := c.evictionList.PushFront(entry) + c.entries[key] = element +} + +// Get returns the value at the specified key from the cache if it exists and is not +// expired, or returns false. +func (c *LRUExpireCache) Get(key interface{}) (interface{}, bool) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return nil, false + } + + if c.clock.Now().After(element.Value.(*cacheEntry).expireTime) { + c.evictionList.Remove(element) + delete(c.entries, key) + return nil, false + } + + c.evictionList.MoveToFront(element) + + return element.Value.(*cacheEntry).value, true +} + +// Remove removes the specified key from the cache if it exists +func (c *LRUExpireCache) Remove(key interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + element, ok := c.entries[key] + if !ok { + return + } + + c.evictionList.Remove(element) + delete(c.entries, key) +} + +// Keys returns all unexpired keys in the cache. +// +// Keep in mind that subsequent calls to Get() for any of the returned keys +// might return "not found". +// +// Keys are returned ordered from least recently used to most recently used. +func (c *LRUExpireCache) Keys() []interface{} { + c.lock.Lock() + defer c.lock.Unlock() + + now := c.clock.Now() + + val := make([]interface{}, 0, c.evictionList.Len()) + for element := c.evictionList.Back(); element != nil; element = element.Prev() { + // Only return unexpired keys + if !now.After(element.Value.(*cacheEntry).expireTime) { + val = append(val, element.Value.(*cacheEntry).key) + } + } + + return val +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go new file mode 100644 index 00000000..ec4002e3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/diff/diff.go @@ -0,0 +1,157 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package diff + +import ( + "bytes" + "fmt" + "reflect" + "strings" + "text/tabwriter" + + "github.com/davecgh/go-spew/spew" + "github.com/google/go-cmp/cmp" +) + +// StringDiff diffs a and b and returns a human readable diff. +func StringDiff(a, b string) string { + ba := []byte(a) + bb := []byte(b) + out := []byte{} + i := 0 + for ; i < len(ba) && i < len(bb); i++ { + if ba[i] != bb[i] { + break + } + out = append(out, ba[i]) + } + out = append(out, []byte("\n\nA: ")...) + out = append(out, ba[i:]...) + out = append(out, []byte("\n\nB: ")...) + out = append(out, bb[i:]...) + out = append(out, []byte("\n\n")...) + return string(out) +} + +func legacyDiff(a, b interface{}) string { + return cmp.Diff(a, b) +} + +// ObjectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectGoPrintDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectReflectDiff prints the diff of two go objects and fails if the objects +// contain unhandled unexported fields. +// DEPRECATED: use github.com/google/go-cmp/cmp.Diff +func ObjectReflectDiff(a, b interface{}) string { + return legacyDiff(a, b) +} + +// ObjectGoPrintSideBySide prints a and b as textual dumps side by side, +// enabling easy visual scanning for mismatches. +func ObjectGoPrintSideBySide(a, b interface{}) string { + s := spew.ConfigState{ + Indent: " ", + // Extra deep spew. + DisableMethods: true, + } + sA := s.Sdump(a) + sB := s.Sdump(b) + + linesA := strings.Split(sA, "\n") + linesB := strings.Split(sB, "\n") + width := 0 + for _, s := range linesA { + l := len(s) + if l > width { + width = l + } + } + for _, s := range linesB { + l := len(s) + if l > width { + width = l + } + } + buf := &bytes.Buffer{} + w := tabwriter.NewWriter(buf, width, 0, 1, ' ', 0) + max := len(linesA) + if len(linesB) > max { + max = len(linesB) + } + for i := 0; i < max; i++ { + var a, b string + if i < len(linesA) { + a = linesA[i] + } + if i < len(linesB) { + b = linesB[i] + } + fmt.Fprintf(w, "%s\t%s\n", a, b) + } + w.Flush() + return buf.String() +} + +// IgnoreUnset is an option that ignores fields that are unset on the right +// hand side of a comparison. This is useful in testing to assert that an +// object is a derivative. +func IgnoreUnset() cmp.Option { + return cmp.Options{ + // ignore unset fields in v2 + cmp.FilterPath(func(path cmp.Path) bool { + _, v2 := path.Last().Values() + switch v2.Kind() { + case reflect.Slice, reflect.Map: + if v2.IsNil() || v2.Len() == 0 { + return true + } + case reflect.String: + if v2.Len() == 0 { + return true + } + case reflect.Interface, reflect.Pointer: + if v2.IsNil() { + return true + } + } + return false + }, cmp.Ignore()), + // ignore map entries that aren't set in v2 + cmp.FilterPath(func(path cmp.Path) bool { + switch i := path.Last().(type) { + case cmp.MapIndex: + if _, v2 := i.Values(); !v2.IsValid() { + fmt.Println("E") + return true + } + } + return false + }, cmp.Ignore()), + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS new file mode 100644 index 00000000..349bc69d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go new file mode 100644 index 00000000..16501d5a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -0,0 +1,102 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "errors" + "fmt" + "reflect" +) + +var ( + ErrBadJSONDoc = errors.New("invalid JSON document") + ErrNoListOfLists = errors.New("lists of lists are not supported") + ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list") + ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") + ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") + ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") +) + +func ErrNoMergeKey(m map[string]interface{}, k string) error { + return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k) +} + +func ErrBadArgType(expected, actual interface{}) error { + return fmt.Errorf("expected a %s, but received a %s", + reflect.TypeOf(expected), + reflect.TypeOf(actual)) +} + +func ErrBadArgKind(expected, actual interface{}) error { + var expectedKindString, actualKindString string + if expected == nil { + expectedKindString = "nil" + } else { + expectedKindString = reflect.TypeOf(expected).Kind().String() + } + if actual == nil { + actualKindString = "nil" + } else { + actualKindString = reflect.TypeOf(actual).Kind().String() + } + return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString) +} + +func ErrBadPatchType(t interface{}, m map[string]interface{}) error { + return fmt.Errorf("unknown patch type: %s in map: %v", t, m) +} + +// IsPreconditionFailed returns true if the provided error indicates +// a precondition failed. +func IsPreconditionFailed(err error) bool { + _, ok := err.(ErrPreconditionFailed) + return ok +} + +type ErrPreconditionFailed struct { + message string +} + +func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed { + s := fmt.Sprintf("precondition failed for: %v", target) + return ErrPreconditionFailed{s} +} + +func (err ErrPreconditionFailed) Error() string { + return err.message +} + +type ErrConflict struct { + message string +} + +func NewErrConflict(patch, current string) ErrConflict { + s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current) + return ErrConflict{s} +} + +func (err ErrConflict) Error() string { + return err.message +} + +// IsConflict returns true if the provided error indicates +// a conflict between the patch and the current configuration. +func IsConflict(err error) bool { + _, ok := err.(ErrConflict) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go new file mode 100644 index 00000000..e3962756 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/util.go @@ -0,0 +1,134 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mergepatch + +import ( + "fmt" + "reflect" + + "github.com/davecgh/go-spew/spew" + "sigs.k8s.io/yaml" +) + +// PreconditionFunc asserts that an incompatible change is not present within a patch. +type PreconditionFunc func(interface{}) bool + +// RequireKeyUnchanged returns a precondition function that fails if the provided key +// is present in the patch (indicating that its value has changed). +func RequireKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + + // The presence of key means that its value has been changed, so the test fails. + _, ok = patchMap[key] + return !ok + } +} + +// RequireMetadataKeyUnchanged creates a precondition function that fails +// if the metadata.key is present in the patch (indicating its value +// has changed). +func RequireMetadataKeyUnchanged(key string) PreconditionFunc { + return func(patch interface{}) bool { + patchMap, ok := patch.(map[string]interface{}) + if !ok { + return true + } + patchMap1, ok := patchMap["metadata"] + if !ok { + return true + } + patchMap2, ok := patchMap1.(map[string]interface{}) + if !ok { + return true + } + _, ok = patchMap2[key] + return !ok + } +} + +func ToYAMLOrError(v interface{}) string { + y, err := toYAML(v) + if err != nil { + return err.Error() + } + + return y +} + +func toYAML(v interface{}) (string, error) { + y, err := yaml.Marshal(v) + if err != nil { + return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v)) + } + + return string(y), nil +} + +// HasConflicts returns true if the left and right JSON interface objects overlap with +// different values in any key. All keys are required to be strings. Since patches of the +// same Type have congruent keys, this is valid for multiple patch types. This method +// supports JSON merge patch semantics. +// +// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts. +// +// Make sure the unmarshaling of left and right are consistent (e.g. use the same library). +func HasConflicts(left, right interface{}) (bool, error) { + switch typedLeft := left.(type) { + case map[string]interface{}: + switch typedRight := right.(type) { + case map[string]interface{}: + for key, leftValue := range typedLeft { + rightValue, ok := typedRight[key] + if !ok { + continue + } + if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case []interface{}: + switch typedRight := right.(type) { + case []interface{}: + if len(typedLeft) != len(typedRight) { + return true, nil + } + + for i := range typedLeft { + if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict { + return conflict, err + } + } + + return false, nil + default: + return true, nil + } + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS new file mode 100644 index 00000000..4443bafd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/OWNERS @@ -0,0 +1,8 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse +emeritus_approvers: + - mengqiy diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 00000000..ab66d045 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 00000000..df305b71 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm *PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm *PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm *PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm *PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Pointer: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go new file mode 100644 index 00000000..6fb36973 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -0,0 +1,2200 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" + "reflect" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/mergepatch" +) + +// An alternate implementation of JSON Merge Patch +// (https://tools.ietf.org/html/rfc7386) which supports the ability to annotate +// certain fields with metadata that indicates whether the elements of JSON +// lists should be merged or replaced. +// +// For more information, see the PATCH section of docs/devel/api-conventions.md. +// +// Some of the content of this package was borrowed with minor adaptations from +// evanphx/json-patch and openshift/origin. + +const ( + directiveMarker = "$patch" + deleteDirective = "delete" + replaceDirective = "replace" + mergeDirective = "merge" + + retainKeysStrategy = "retainKeys" + + deleteFromPrimitiveListDirectivePrefix = "$deleteFromPrimitiveList" + retainKeysDirective = "$" + retainKeysStrategy + setElementOrderDirectivePrefix = "$setElementOrder" +) + +// JSONMap is a representations of JSON object encoded as map[string]interface{} +// where the children can be either map[string]interface{}, []interface{} or +// primitive type). +// Operating on JSONMap representation is much faster as it doesn't require any +// json marshaling and/or unmarshaling operations. +type JSONMap map[string]interface{} + +type DiffOptions struct { + // SetElementOrder determines whether we generate the $setElementOrder parallel list. + SetElementOrder bool + // IgnoreChangesAndAdditions indicates if we keep the changes and additions in the patch. + IgnoreChangesAndAdditions bool + // IgnoreDeletions indicates if we keep the deletions in the patch. + IgnoreDeletions bool + // We introduce a new value retainKeys for patchStrategy. + // It indicates that all fields needing to be preserved must be + // present in the `retainKeys` list. + // And the fields that are present will be merged with live object. + // All the missing fields will be cleared when patching. + BuildRetainKeysDirective bool +} + +type MergeOptions struct { + // MergeParallelList indicates if we are merging the parallel list. + // We don't merge parallel list when calling mergeMap() in CreateThreeWayMergePatch() + // which is called client-side. + // We merge parallel list iff when calling mergeMap() in StrategicMergeMapPatch() + // which is called server-side + MergeParallelList bool + // IgnoreUnmatchedNulls indicates if we should process the unmatched nulls. + IgnoreUnmatchedNulls bool +} + +// The following code is adapted from github.com/openshift/origin/pkg/util/jsonmerge. +// Instead of defining a Delta that holds an original, a patch and a set of preconditions, +// the reconcile method accepts a set of preconditions as an argument. + +// CreateTwoWayMergePatch creates a patch that can be passed to StrategicMergePatch from an original +// document and a modified document, which are passed to the method as json encoded content. It will +// return a patch that yields the modified document when applied to the original document, or an error +// if either of the two documents is invalid. +func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) + if err != nil { + return nil, err + } + + return json.Marshal(patchMap) +} + +// CreateTwoWayMergeMapPatch creates a patch from an original and modified JSON objects, +// encoded JSONMap. +// The serialized version of the map can then be passed to StrategicMergeMapPatch. +func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { + diffOptions := DiffOptions{ + SetElementOrder: true, + } + patchMap, err := diffMaps(original, modified, schema, diffOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + return patchMap, nil +} + +// Returns a (recursive) strategic merge patch that yields modified when applied to original. +// Including: +// - Adding fields to the patch present in modified, missing from original +// - Setting fields to the patch present in modified and original with different values +// - Delete fields present in original, missing from modified through +// - IFF map field - set to nil in patch +// - IFF list of maps && merge strategy - use deleteDirective for the elements +// - IFF list of primitives && merge strategy - use parallel deletion list +// - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified +// - Build $retainKeys directive for fields with retainKeys patch strategy +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { + patch := map[string]interface{}{} + + // This will be used to build the $retainKeys directive sent in the patch + retainKeysList := make([]interface{}, 0, len(modified)) + + // Compare each value in the modified map against the value in the original map + for key, modifiedValue := range modified { + // Get the underlying type for pointers + if diffOptions.BuildRetainKeysDirective && modifiedValue != nil { + retainKeysList = append(retainKeysList, key) + } + + originalValue, ok := original[key] + if !ok { + // Key was added, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // The patch may have a patch directive + // TODO: figure out if we need this. This shouldn't be needed by apply. When would the original map have patch directives in it? + foundDirectiveMarker, err := handleDirectiveMarker(key, originalValue, modifiedValue, patch) + if err != nil { + return nil, err + } + if foundDirectiveMarker { + continue + } + + if reflect.TypeOf(originalValue) != reflect.TypeOf(modifiedValue) { + // Types have changed, so add to patch + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + continue + } + + // Types are the same, so compare values + switch originalValueTyped := originalValue.(type) { + case map[string]interface{}: + modifiedValueTyped := modifiedValue.(map[string]interface{}) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + case []interface{}: + modifiedValueTyped := modifiedValue.([]interface{}) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + if err != nil { + return nil, err + } + } + + updatePatchIfMissing(original, modified, patch, diffOptions) + // Insert the retainKeysList iff there are values present in the retainKeysList and + // either of the following is true: + // - the patch is not empty + // - there are additional field in original that need to be cleared + if len(retainKeysList) > 0 && + (len(patch) > 0 || hasAdditionalNewField(original, modified)) { + patch[retainKeysDirective] = sortScalars(retainKeysList) + } + return patch, nil +} + +// handleDirectiveMarker handles how to diff directive marker between 2 objects +func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, patch map[string]interface{}) (bool, error) { + if key == directiveMarker { + originalString, ok := originalValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + modifiedString, ok := modifiedValue.(string) + if !ok { + return false, fmt.Errorf("invalid value for special key: %s", directiveMarker) + } + if modifiedString != originalString { + patch[directiveMarker] = modifiedValue + } + return true, nil + } + return false, nil +} + +// handleMapDiff diff between 2 maps `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both maps +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + diffOptions.BuildRetainKeysDirective = retainKeys + switch patchStrategy { + // The patch strategic from metadata tells us to replace the entire object instead of diffing it + case replaceDirective: + if !diffOptions.IgnoreChangesAndAdditions { + patch[key] = modifiedValue + } + default: + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) + if err != nil { + return err + } + // Maps were not identical, use provided patch value + if len(patchValue) > 0 { + patch[key] = patchValue + } + } + return nil +} + +// handleSliceDiff diff between 2 slices `originalValueTyped` and `modifiedValue`, +// puts the diff in the `patch` associated with `key` +// key is the key associated with originalValue and modifiedValue. +// originalValue, modifiedValue are the old and new value respectively.They are both slices +// patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue +// diffOptions contains multiple options to control how we do the diff. +func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) + if err != nil { + // We couldn't look up metadata for the field + // If the values are identical, this doesn't matter, no patch is needed + if reflect.DeepEqual(originalValue, modifiedValue) { + return nil + } + // Otherwise, return the error + return err + } + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + switch patchStrategy { + // Merge the 2 slices using mergePatchKey + case mergeDirective: + diffOptions.BuildRetainKeysDirective = retainKeys + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) + if err != nil { + return err + } + if len(addList) > 0 { + patch[key] = addList + } + // generate a parallel list for deletion + if len(deletionList) > 0 { + parallelDeletionListKey := fmt.Sprintf("%s/%s", deleteFromPrimitiveListDirectivePrefix, key) + patch[parallelDeletionListKey] = deletionList + } + if len(setOrderList) > 0 { + parallelSetOrderListKey := fmt.Sprintf("%s/%s", setElementOrderDirectivePrefix, key) + patch[parallelSetOrderListKey] = setOrderList + } + default: + replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) + } + return nil +} + +// replacePatchFieldIfNotEqual updates the patch if original and modified are not deep equal +// if diffOptions.IgnoreChangesAndAdditions is false. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func replacePatchFieldIfNotEqual(key string, original, modified interface{}, + patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreChangesAndAdditions { + // Ignoring changes - do nothing + return + } + if reflect.DeepEqual(original, modified) { + // Contents are identical - do nothing + return + } + // Create a patch to replace the old value with the new one + patch[key] = modified +} + +// updatePatchIfMissing iterates over `original` when ignoreDeletions is false. +// Clear the field whose key is not present in `modified`. +// original is the old value, maybe either the live cluster object or the last applied configuration +// modified is the new value, is always the users new config +func updatePatchIfMissing(original, modified, patch map[string]interface{}, diffOptions DiffOptions) { + if diffOptions.IgnoreDeletions { + // Ignoring deletion - do nothing + return + } + // Add nils for deleted values + for key := range original { + if _, found := modified[key]; !found { + patch[key] = nil + } + } +} + +// validateMergeKeyInLists checks if each map in the list has the mentryerge key. +func validateMergeKeyInLists(mergeKey string, lists ...[]interface{}) error { + for _, list := range lists { + for _, item := range list { + m, ok := item.(map[string]interface{}) + if !ok { + return mergepatch.ErrBadArgType(m, item) + } + if _, ok = m[mergeKey]; !ok { + return mergepatch.ErrNoMergeKey(m, mergeKey) + } + } + } + return nil +} + +// normalizeElementOrder sort `patch` list by `patchOrder` and sort `serverOnly` list by `serverOrder`. +// Then it merges the 2 sorted lists. +// It guarantee the relative order in the patch list and in the serverOnly list is kept. +// `patch` is a list of items in the patch, and `serverOnly` is a list of items in the live object. +// `patchOrder` is the order we want `patch` list to have and +// `serverOrder` is the order we want `serverOnly` list to have. +// kind is the kind of each item in the lists `patch` and `serverOnly`. +func normalizeElementOrder(patch, serverOnly, patchOrder, serverOrder []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + patch, err := normalizeSliceOrder(patch, patchOrder, mergeKey, kind) + if err != nil { + return nil, err + } + serverOnly, err = normalizeSliceOrder(serverOnly, serverOrder, mergeKey, kind) + if err != nil { + return nil, err + } + all := mergeSortedSlice(serverOnly, patch, serverOrder, mergeKey, kind) + + return all, nil +} + +// mergeSortedSlice merges the 2 sorted lists by serverOrder with best effort. +// It will insert each item in `left` list to `right` list. In most cases, the 2 lists will be interleaved. +// The relative order of left and right are guaranteed to be kept. +// They have higher precedence than the order in the live list. +// The place for a item in `left` is found by: +// scan from the place of last insertion in `right` to the end of `right`, +// the place is before the first item that is greater than the item we want to insert. +// example usage: using server-only items as left and patch items as right. We insert server-only items +// to patch list. We use the order of live object as record for comparison. +func mergeSortedSlice(left, right, serverOrder []interface{}, mergeKey string, kind reflect.Kind) []interface{} { + // Returns if l is less than r, and if both have been found. + // If l and r both present and l is in front of r, l is less than r. + less := func(l, r interface{}) (bool, bool) { + li := index(serverOrder, l, mergeKey, kind) + ri := index(serverOrder, r, mergeKey, kind) + if li >= 0 && ri >= 0 { + return li < ri, true + } else { + return false, false + } + } + + // left and right should be non-overlapping. + size := len(left) + len(right) + i, j := 0, 0 + s := make([]interface{}, size, size) + + for k := 0; k < size; k++ { + if i >= len(left) && j < len(right) { + // have items left in `right` list + s[k] = right[j] + j++ + } else if j >= len(right) && i < len(left) { + // have items left in `left` list + s[k] = left[i] + i++ + } else { + // compare them if i and j are both in bound + less, foundBoth := less(left[i], right[j]) + if foundBoth && less { + s[k] = left[i] + i++ + } else { + s[k] = right[j] + j++ + } + } + } + return s +} + +// index returns the index of the item in the given items, or -1 if it doesn't exist +// l must NOT be a slice of slices, this should be checked before calling. +func index(l []interface{}, valToLookUp interface{}, mergeKey string, kind reflect.Kind) int { + var getValFn func(interface{}) interface{} + // Get the correct `getValFn` based on item `kind`. + // It should return the value of merge key for maps and + // return the item for other kinds. + switch kind { + case reflect.Map: + getValFn = func(item interface{}) interface{} { + typedItem, ok := item.(map[string]interface{}) + if !ok { + return nil + } + val := typedItem[mergeKey] + return val + } + default: + getValFn = func(item interface{}) interface{} { + return item + } + } + + for i, v := range l { + if getValFn(valToLookUp) == getValFn(v) { + return i + } + } + return -1 +} + +// extractToDeleteItems takes a list and +// returns 2 lists: one contains items that should be kept and the other contains items to be deleted. +func extractToDeleteItems(l []interface{}) ([]interface{}, []interface{}, error) { + var nonDelete, toDelete []interface{} + for _, v := range l { + m, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, v) + } + + directive, foundDirective := m[directiveMarker] + if foundDirective && directive == deleteDirective { + toDelete = append(toDelete, v) + } else { + nonDelete = append(nonDelete, v) + } + } + return nonDelete, toDelete, nil +} + +// normalizeSliceOrder sort `toSort` list by `order` +func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind reflect.Kind) ([]interface{}, error) { + var toDelete []interface{} + if kind == reflect.Map { + // make sure each item in toSort, order has merge key + err := validateMergeKeyInLists(mergeKey, toSort, order) + if err != nil { + return nil, err + } + toSort, toDelete, err = extractToDeleteItems(toSort) + if err != nil { + return nil, err + } + } + + sort.SliceStable(toSort, func(i, j int) bool { + if ii := index(order, toSort[i], mergeKey, kind); ii >= 0 { + if ij := index(order, toSort[j], mergeKey, kind); ij >= 0 { + return ii < ij + } + } + return true + }) + toSort = append(toSort, toDelete...) + return toSort, nil +} + +// Returns a (recursive) strategic merge patch, a parallel deletion list if necessary and +// another list to set the order of the list +// Only list of primitives with merge strategy will generate a parallel deletion list. +// These two lists should yield modified when applied to original, for lists with merge semantics. +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { + if len(original) == 0 { + // Both slices are empty - do nothing + if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { + return nil, nil, nil, nil + } + + // Old slice was empty - add all elements from the new slice + return modified, nil, nil, nil + } + + elementType, err := sliceElementType(original, modified) + if err != nil { + return nil, nil, nil, err + } + + var patchList, deleteList, setOrderList []interface{} + kind := elementType.Kind() + switch kind { + case reflect.Map: + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + if err != nil { + return nil, nil, nil, err + } + orderSame, err := isOrderSame(original, modified, mergeKey) + if err != nil { + return nil, nil, nil, err + } + // append the deletions to the end of the patch list. + patchList = append(patchList, deleteList...) + deleteList = nil + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && + ((!diffOptions.IgnoreChangesAndAdditions && (len(patchList) > 0 || !orderSame)) || + (!diffOptions.IgnoreDeletions && len(patchList) > 0)) { + // Generate a list of maps that each item contains only the merge key. + setOrderList = make([]interface{}, len(modified)) + for i, v := range modified { + typedV := v.(map[string]interface{}) + setOrderList[i] = map[string]interface{}{ + mergeKey: typedV[mergeKey], + } + } + } + case reflect.Slice: + // Lists of Lists are not permitted by the api + return nil, nil, nil, mergepatch.ErrNoListOfLists + default: + patchList, deleteList, err = diffListsOfScalars(original, modified, diffOptions) + if err != nil { + return nil, nil, nil, err + } + patchList, err = normalizeSliceOrder(patchList, modified, mergeKey, kind) + // generate the setElementOrder list when there are content changes or order changes + if diffOptions.SetElementOrder && ((!diffOptions.IgnoreDeletions && len(deleteList) > 0) || + (!diffOptions.IgnoreChangesAndAdditions && !reflect.DeepEqual(original, modified))) { + setOrderList = modified + } + } + return patchList, deleteList, setOrderList, err +} + +// isOrderSame checks if the order in a list has changed +func isOrderSame(original, modified []interface{}, mergeKey string) (bool, error) { + if len(original) != len(modified) { + return false, nil + } + for i, modifiedItem := range modified { + equal, err := mergeKeyValueEqual(original[i], modifiedItem, mergeKey) + if err != nil || !equal { + return equal, err + } + } + return true, nil +} + +// diffListsOfScalars returns 2 lists, the first one is addList and the second one is deletionList. +// Argument diffOptions.IgnoreChangesAndAdditions controls if calculate addList. true means not calculate. +// Argument diffOptions.IgnoreDeletions controls if calculate deletionList. true means not calculate. +// original may be changed, but modified is guaranteed to not be changed +func diffListsOfScalars(original, modified []interface{}, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + modifiedCopy := make([]interface{}, len(modified)) + copy(modifiedCopy, modified) + // Sort the scalars for easier calculating the diff + originalScalars := sortScalars(original) + modifiedScalars := sortScalars(modifiedCopy) + + originalIndex, modifiedIndex := 0, 0 + addList := []interface{}{} + deletionList := []interface{}{} + + for { + originalInBounds := originalIndex < len(originalScalars) + modifiedInBounds := modifiedIndex < len(modifiedScalars) + if !originalInBounds && !modifiedInBounds { + break + } + // we need to compare the string representation of the scalar, + // because the scalar is an interface which doesn't support either < or > + // And that's how func sortScalars compare scalars. + var originalString, modifiedString string + var originalValue, modifiedValue interface{} + if originalInBounds { + originalValue = originalScalars[originalIndex] + originalString = fmt.Sprintf("%v", originalValue) + } + if modifiedInBounds { + modifiedValue = modifiedScalars[modifiedIndex] + modifiedString = fmt.Sprintf("%v", modifiedValue) + } + + originalV, modifiedV := compareListValuesAtIndex(originalInBounds, modifiedInBounds, originalString, modifiedString) + switch { + case originalV == nil && modifiedV == nil: + originalIndex++ + modifiedIndex++ + case originalV != nil && modifiedV == nil: + if !diffOptions.IgnoreDeletions { + deletionList = append(deletionList, originalValue) + } + originalIndex++ + case originalV == nil && modifiedV != nil: + if !diffOptions.IgnoreChangesAndAdditions { + addList = append(addList, modifiedValue) + } + modifiedIndex++ + default: + return nil, nil, fmt.Errorf("Unexpected returned value from compareListValuesAtIndex: %v and %v", originalV, modifiedV) + } + } + + return addList, deduplicateScalars(deletionList), nil +} + +// If first return value is non-nil, list1 contains an element not present in list2 +// If second return value is non-nil, list2 contains an element not present in list1 +func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, list2Value string) (interface{}, interface{}) { + bothInBounds := list1Inbounds && list2Inbounds + switch { + // scalars are identical + case bothInBounds && list1Value == list2Value: + return nil, nil + // only list2 is in bound + case !list1Inbounds: + fallthrough + // list2 has additional scalar + case bothInBounds && list1Value > list2Value: + return nil, list2Value + // only original is in bound + case !list2Inbounds: + fallthrough + // original has additional scalar + case bothInBounds && list1Value < list2Value: + return list1Value, nil + default: + return nil, nil + } +} + +// diffListsOfMaps takes a pair of lists and +// returns a (recursive) strategic merge patch list contains additions and changes and +// a deletion list contains deletions +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(modified)) + deletionList := make([]interface{}, 0, len(original)) + + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) + if err != nil { + return nil, nil, err + } + + originalIndex, modifiedIndex := 0, 0 + for { + originalInBounds := originalIndex < len(originalSorted) + modifiedInBounds := modifiedIndex < len(modifiedSorted) + bothInBounds := originalInBounds && modifiedInBounds + if !originalInBounds && !modifiedInBounds { + break + } + + var originalElementMergeKeyValueString, modifiedElementMergeKeyValueString string + var originalElementMergeKeyValue, modifiedElementMergeKeyValue interface{} + var originalElement, modifiedElement map[string]interface{} + if originalInBounds { + originalElement, originalElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(originalIndex, mergeKey, originalSorted) + if err != nil { + return nil, nil, err + } + originalElementMergeKeyValueString = fmt.Sprintf("%v", originalElementMergeKeyValue) + } + if modifiedInBounds { + modifiedElement, modifiedElementMergeKeyValue, err = getMapAndMergeKeyValueByIndex(modifiedIndex, mergeKey, modifiedSorted) + if err != nil { + return nil, nil, err + } + modifiedElementMergeKeyValueString = fmt.Sprintf("%v", modifiedElementMergeKeyValue) + } + + switch { + case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + // Merge key values are equal, so recurse + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) + if err != nil { + return nil, nil, err + } + if len(patchValue) > 0 { + patchValue[mergeKey] = modifiedElementMergeKeyValue + patch = append(patch, patchValue) + } + originalIndex++ + modifiedIndex++ + // only modified is in bound + case !originalInBounds: + fallthrough + // modified has additional map + case bothInBounds && ItemAddedToModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreChangesAndAdditions { + patch = append(patch, modifiedElement) + } + modifiedIndex++ + // only original is in bound + case !modifiedInBounds: + fallthrough + // original has additional map + case bothInBounds && ItemRemovedFromModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): + if !diffOptions.IgnoreDeletions { + // Item was deleted, so add delete directive + deletionList = append(deletionList, CreateDeleteDirective(mergeKey, originalElementMergeKeyValue)) + } + originalIndex++ + } + } + + return patch, deletionList, nil +} + +// getMapAndMergeKeyValueByIndex return a map in the list and its merge key value given the index of the map. +func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []interface{}) (map[string]interface{}, interface{}, error) { + m, ok := listOfMaps[index].(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(m, listOfMaps[index]) + } + + val, ok := m[mergeKey] + if !ok { + return nil, nil, mergepatch.ErrNoMergeKey(m, mergeKey) + } + return m, val, nil +} + +// StrategicMergePatch applies a strategic merge patch. The patch and the original document +// must be json encoded content. A patch can be created from an original and a modified document +// by calling CreateStrategicMergePatch. +func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { + originalMap, err := handleUnmarshal(original) + if err != nil { + return nil, err + } + patchMap, err := handleUnmarshal(patch) + if err != nil { + return nil, err + } + + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) + if err != nil { + return nil, err + } + + return json.Marshal(result) +} + +func handleUnmarshal(j []byte) (map[string]interface{}, error) { + if j == nil { + j = []byte("{}") + } + + m := map[string]interface{}{} + err := json.Unmarshal(j, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + return m, nil +} + +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents +// must be JSONMap. A patch can be created from an original and modified document by +// calling CreateTwoWayMergeMapPatch. +// Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. +func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: true, + IgnoreUnmatchedNulls: true, + } + return mergeMap(original, patch, schema, mergeOptions) +} + +// MergeStrategicMergeMapPatchUsingLookupPatchMeta merges strategic merge +// patches retaining `null` fields and parallel lists. If 2 patches change the +// same fields and the latter one will override the former one. If you don't +// want that happen, you need to run func MergingMapsHaveConflicts before +// merging these patches. Applying the resulting merged merge patch to a JSONMap +// yields the same as merging each strategic merge patch to the JSONMap in +// succession. +func MergeStrategicMergeMapPatchUsingLookupPatchMeta(schema LookupPatchMeta, patches ...JSONMap) (JSONMap, error) { + mergeOptions := MergeOptions{ + MergeParallelList: false, + IgnoreUnmatchedNulls: false, + } + merged := JSONMap{} + var err error + for _, patch := range patches { + merged, err = mergeMap(merged, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + } + return merged, nil +} + +// handleDirectiveInMergeMap handles the patch directive when merging 2 maps. +func handleDirectiveInMergeMap(directive interface{}, patch map[string]interface{}) (map[string]interface{}, error) { + if directive == replaceDirective { + // If the patch contains "$patch: replace", don't merge it, just use the + // patch directly. Later on, we can add a single level replace that only + // affects the map that the $patch is in. + delete(patch, directiveMarker) + return patch, nil + } + + if directive == deleteDirective { + // If the patch contains "$patch: delete", don't merge it, just return + // an empty map. + return map[string]interface{}{}, nil + } + + return nil, mergepatch.ErrBadPatchType(directive, patch) +} + +func containsDirectiveMarker(item interface{}) bool { + m, ok := item.(map[string]interface{}) + if ok { + if _, foundDirectiveMarker := m[directiveMarker]; foundDirectiveMarker { + return true + } + } + return false +} + +func mergeKeyValueEqual(left, right interface{}, mergeKey string) (bool, error) { + if len(mergeKey) == 0 { + return left == right, nil + } + typedLeft, ok := left.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedLeft, left) + } + typedRight, ok := right.(map[string]interface{}) + if !ok { + return false, mergepatch.ErrBadArgType(typedRight, right) + } + mergeKeyLeft, ok := typedLeft[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedLeft, mergeKey) + } + mergeKeyRight, ok := typedRight[mergeKey] + if !ok { + return false, mergepatch.ErrNoMergeKey(typedRight, mergeKey) + } + return mergeKeyLeft == mergeKeyRight, nil +} + +// extractKey trims the prefix and return the original key +func extractKey(s, prefix string) (string, error) { + substrings := strings.SplitN(s, "/", 2) + if len(substrings) <= 1 || substrings[0] != prefix { + switch prefix { + case deleteFromPrimitiveListDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForPrimitiveList + case setElementOrderDirectivePrefix: + return "", mergepatch.ErrBadPatchFormatForSetElementOrderList + default: + return "", fmt.Errorf("fail to find unknown prefix %q in %s\n", prefix, s) + } + } + return substrings[1], nil +} + +// validatePatchUsingSetOrderList verifies: +// the relative order of any two items in the setOrderList list matches that in the patch list. +// the items in the patch list must be a subset or the same as the $setElementOrder list (deletions are ignored). +func validatePatchWithSetOrderList(patchList, setOrderList interface{}, mergeKey string) error { + typedSetOrderList, ok := setOrderList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + typedPatchList, ok := patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + if len(typedSetOrderList) == 0 || len(typedPatchList) == 0 { + return nil + } + + var nonDeleteList []interface{} + var err error + if len(mergeKey) > 0 { + nonDeleteList, _, err = extractToDeleteItems(typedPatchList) + if err != nil { + return err + } + } else { + nonDeleteList = typedPatchList + } + + patchIndex, setOrderIndex := 0, 0 + for patchIndex < len(nonDeleteList) && setOrderIndex < len(typedSetOrderList) { + if containsDirectiveMarker(nonDeleteList[patchIndex]) { + patchIndex++ + continue + } + mergeKeyEqual, err := mergeKeyValueEqual(nonDeleteList[patchIndex], typedSetOrderList[setOrderIndex], mergeKey) + if err != nil { + return err + } + if mergeKeyEqual { + patchIndex++ + } + setOrderIndex++ + } + // If patchIndex is inbound but setOrderIndex if out of bound mean there are items mismatching between the patch list and setElementOrder list. + // the second check is a sanity check, and should always be true if the first is true. + if patchIndex < len(nonDeleteList) && setOrderIndex >= len(typedSetOrderList) { + return fmt.Errorf("The order in patch list:\n%v\n doesn't match %s list:\n%v\n", typedPatchList, setElementOrderDirectivePrefix, setOrderList) + } + return nil +} + +// preprocessDeletionListForMerging preprocesses the deletion list. +// it returns shouldContinue, isDeletionList, noPrefixKey +func preprocessDeletionListForMerging(key string, original map[string]interface{}, + patchVal interface{}, mergeDeletionList bool) (bool, bool, string, error) { + // If found a parallel list for deletion and we are going to merge the list, + // overwrite the key to the original key and set flag isDeleteList + foundParallelListPrefix := strings.HasPrefix(key, deleteFromPrimitiveListDirectivePrefix) + if foundParallelListPrefix { + if !mergeDeletionList { + original[key] = patchVal + return true, false, "", nil + } + originalKey, err := extractKey(key, deleteFromPrimitiveListDirectivePrefix) + return false, true, originalKey, err + } + return false, false, "", nil +} + +// applyRetainKeysDirective looks for a retainKeys directive and applies to original +// - if no directive exists do nothing +// - if directive is found, clear keys in original missing from the directive list +// - validate that all keys present in the patch are present in the retainKeys directive +// note: original may be another patch request, e.g. applying the add+modified patch to the deletions patch. In this case it may have directives +func applyRetainKeysDirective(original, patch map[string]interface{}, options MergeOptions) error { + retainKeysInPatch, foundInPatch := patch[retainKeysDirective] + if !foundInPatch { + return nil + } + // cleanup the directive + delete(patch, retainKeysDirective) + + if !options.MergeParallelList { + // If original is actually a patch, make sure the retainKeys directives are the same in both patches if present in both. + // If not present in the original patch, copy from the modified patch. + retainKeysInOriginal, foundInOriginal := original[retainKeysDirective] + if foundInOriginal { + if !reflect.DeepEqual(retainKeysInOriginal, retainKeysInPatch) { + // This error actually should never happen. + return fmt.Errorf("%v and %v are not deep equal: this may happen when calculating the 3-way diff patch", retainKeysInOriginal, retainKeysInPatch) + } + } else { + original[retainKeysDirective] = retainKeysInPatch + } + return nil + } + + retainKeysList, ok := retainKeysInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + + // validate patch to make sure all fields in the patch are present in the retainKeysList. + // The map is used only as a set, the value is never referenced + m := map[interface{}]struct{}{} + for _, v := range retainKeysList { + m[v] = struct{}{} + } + for k, v := range patch { + if v == nil || strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) || + strings.HasPrefix(k, setElementOrderDirectivePrefix) { + continue + } + // If there is an item present in the patch but not in the retainKeys list, + // the patch is invalid. + if _, found := m[k]; !found { + return mergepatch.ErrBadPatchFormatForRetainKeys + } + } + + // clear not present fields + for k := range original { + if _, found := m[k]; !found { + delete(original, k) + } + } + return nil +} + +// mergePatchIntoOriginal processes $setElementOrder list. +// When not merging the directive, it will make sure $setElementOrder list exist only in original. +// When merging the directive, it will try to find the $setElementOrder list and +// its corresponding patch list, validate it and merge it. +// Then, sort them by the relative order in setElementOrder, patch list and live list. +// The precedence is $setElementOrder > order in patch list > order in live list. +// This function will delete the item after merging it to prevent process it again in the future. +// Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { + for key, patchV := range patch { + // Do nothing if there is no ordering directive + if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { + continue + } + + setElementOrderInPatch := patchV + // Copies directive from the second patch (`patch`) to the first patch (`original`) + // and checks they are equal and delete the directive in the second patch + if !mergeOptions.MergeParallelList { + setElementOrderListInOriginal, ok := original[key] + if ok { + // check if the setElementOrder list in original and the one in patch matches + if !reflect.DeepEqual(setElementOrderListInOriginal, setElementOrderInPatch) { + return mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else { + // move the setElementOrder list from patch to original + original[key] = setElementOrderInPatch + } + } + delete(patch, key) + + var ( + ok bool + originalFieldValue, patchFieldValue, merged []interface{} + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta + ) + typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(typedSetElementOrderList, setElementOrderInPatch) + } + // Trim the setElementOrderDirectivePrefix to get the key of the list field in original. + originalKey, err := extractKey(key, setElementOrderDirectivePrefix) + if err != nil { + return err + } + // try to find the list with `originalKey` in `original` and `modified` and merge them. + originalList, foundOriginal := original[originalKey] + patchList, foundPatch := patch[originalKey] + if foundOriginal { + originalFieldValue, ok = originalList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(originalFieldValue, originalList) + } + } + if foundPatch { + patchFieldValue, ok = patchList.([]interface{}) + if !ok { + return mergepatch.ErrBadArgType(patchFieldValue, patchList) + } + } + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) + if err != nil { + return err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return err + } + // Check for consistency between the element order list and the field it applies to + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + + switch { + case foundOriginal && !foundPatch: + // no change to list contents + merged = originalFieldValue + case !foundOriginal && foundPatch: + // list was added + merged = patchFieldValue + case foundOriginal && foundPatch: + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) + if err != nil { + return err + } + case !foundOriginal && !foundPatch: + continue + } + + // Split all items into patch items and server-only items and then enforce the order. + var patchItems, serverOnlyItems []interface{} + if len(patchMeta.GetPatchMergeKey()) == 0 { + // Primitives doesn't need merge key to do partitioning. + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) + + } else { + // Maps need merge key to do partitioning. + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) + if err != nil { + return err + } + } + + elementType, err := sliceElementType(originalFieldValue, patchFieldValue) + if err != nil { + return err + } + kind := elementType.Kind() + // normalize merged list + // typedSetElementOrderList contains all the relative order in typedPatchList, + // so don't need to use typedPatchList + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) + if err != nil { + return err + } + original[originalKey] = both + // delete patch list from patch to prevent process again in the future + delete(patch, originalKey) + } + return nil +} + +// partitionPrimitivesByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionPrimitivesByPresentInList(original, partitionBy []interface{}) ([]interface{}, []interface{}) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + inPatch := map[interface{}]bool{} + for _, v := range partitionBy { + inPatch[v] = true + } + for _, v := range original { + if !inPatch[v] { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly +} + +// partitionMapsByPresentInList partitions elements into 2 slices, the first containing items present in partitionBy, the other not. +func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patch := make([]interface{}, 0, len(original)) + serverOnly := make([]interface{}, 0, len(original)) + for _, v := range original { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedV, v) + } + mergeKeyValue, foundMergeKey := typedV[mergeKey] + if !foundMergeKey { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + _, _, found, err := findMapInSliceBasedOnKeyValue(partitionBy, mergeKey, mergeKeyValue) + if err != nil { + return nil, nil, err + } + if !found { + serverOnly = append(serverOnly, v) + } else { + patch = append(patch, v) + } + } + return patch, serverOnly, nil +} + +// Merge fields from a patch map into the original map. Note: This may modify +// both the original map and the patch because getting a deep copy of a map in +// golang is highly non-trivial. +// flag mergeOptions.MergeParallelList controls if using the parallel list to delete or keeping the list. +// If patch contains any null field (e.g. field_1: null) that is not +// present in original, then to propagate it to the end result use +// mergeOptions.IgnoreUnmatchedNulls == false. +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { + if v, ok := patch[directiveMarker]; ok { + return handleDirectiveInMergeMap(v, patch) + } + + // nil is an accepted value for original to simplify logic in other places. + // If original is nil, replace it with an empty map and then apply the patch. + if original == nil { + original = map[string]interface{}{} + } + + err := applyRetainKeysDirective(original, patch, mergeOptions) + if err != nil { + return nil, err + } + + // Process $setElementOrder list and other lists sharing the same key. + // When not merging the directive, it will make sure $setElementOrder list exist only in original. + // When merging the directive, it will process $setElementOrder and its patch list together. + // This function will delete the merged elements from patch so they will not be reprocessed + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Start merging the patch into the original. + for k, patchV := range patch { + skipProcessing, isDeleteList, noPrefixKey, err := preprocessDeletionListForMerging(k, original, patchV, mergeOptions.MergeParallelList) + if err != nil { + return nil, err + } + if skipProcessing { + continue + } + if len(noPrefixKey) > 0 { + k = noPrefixKey + } + + // If the value of this key is null, delete the key if it exists in the + // original. Otherwise, check if we want to preserve it or skip it. + // Preserving the null value is useful when we want to send an explicit + // delete to the API server. + if patchV == nil { + delete(original, k) + if mergeOptions.IgnoreUnmatchedNulls { + continue + } + } + + _, ok := original[k] + if !ok { + if !isDeleteList { + // If it's not in the original document, just take the patch value. + if mergeOptions.IgnoreUnmatchedNulls { + discardNullValuesFromPatch(patchV) + } + original[k] = patchV + } + continue + } + + originalType := reflect.TypeOf(original[k]) + patchType := reflect.TypeOf(patchV) + if originalType != patchType { + if !isDeleteList { + if mergeOptions.IgnoreUnmatchedNulls { + discardNullValuesFromPatch(patchV) + } + original[k] = patchV + } + continue + } + // If they're both maps or lists, recurse into the value. + switch originalType.Kind() { + case reflect.Map: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForStruct(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) + case reflect.Slice: + subschema, patchMeta, err2 := schema.LookupPatchMetadataForSlice(k) + if err2 != nil { + return nil, err2 + } + _, patchStrategy, err2 := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err2 != nil { + return nil, err2 + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) + default: + original[k] = patchV + } + if err != nil { + return nil, err + } + } + return original, nil +} + +// discardNullValuesFromPatch discards all null property values from patch. +// It traverses all slices and map types. +func discardNullValuesFromPatch(patchV interface{}) { + switch patchV := patchV.(type) { + case map[string]interface{}: + for k, v := range patchV { + if v == nil { + delete(patchV, k) + } else { + discardNullValuesFromPatch(v) + } + } + case []interface{}: + for _, v := range patchV { + discardNullValuesFromPatch(v) + } + } +} + +// mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy and mergeOptions. +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { + typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy != replaceDirective { + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) + } else { + return typedPatch, nil + } +} + +// mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting +// fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { + typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) + if err != nil { + return nil, err + } + + if fieldPatchStrategy == mergeDirective { + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) + } else { + return typedPatch, nil + } +} + +// Merge two slices together. Note: This may modify both the original slice and +// the patch because getting a deep copy of a slice in golang is highly +// non-trivial. +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { + if len(original) == 0 && len(patch) == 0 { + return original, nil + } + + // All the values must be of the same type, but not a list. + t, err := sliceElementType(original, patch) + if err != nil { + return nil, err + } + + var merged []interface{} + kind := t.Kind() + // If the elements are not maps, merge the slices of scalars. + if kind != reflect.Map { + if mergeOptions.MergeParallelList && isDeleteList { + return deleteFromSlice(original, patch), nil + } + // Maybe in the future add a "concat" mode that doesn't + // deduplicate. + both := append(original, patch...) + merged = deduplicateScalars(both) + + } else { + if mergeKey == "" { + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) + } + + original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) + if err != nil { + return nil, err + } + + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) + if err != nil { + return nil, err + } + } + + // enforce the order + var patchItems, serverOnlyItems []interface{} + if len(mergeKey) == 0 { + patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, patch) + } else { + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, patch, mergeKey) + if err != nil { + return nil, err + } + } + return normalizeElementOrder(patchItems, serverOnlyItems, patch, original, mergeKey, kind) +} + +// mergeSliceWithSpecialElements handles special elements with directiveMarker +// before merging the slices. It returns a updated `original` and a patch without special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithSpecialElements(original, patch []interface{}, mergeKey string) ([]interface{}, []interface{}, error) { + patchWithoutSpecialElements := []interface{}{} + replace := false + for _, v := range patch { + typedV := v.(map[string]interface{}) + patchType, ok := typedV[directiveMarker] + if !ok { + patchWithoutSpecialElements = append(patchWithoutSpecialElements, v) + } else { + switch patchType { + case deleteDirective: + mergeValue, ok := typedV[mergeKey] + if ok { + var err error + original, err = deleteMatchingEntries(original, mergeKey, mergeValue) + if err != nil { + return nil, nil, err + } + } else { + return nil, nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + case replaceDirective: + replace = true + // Continue iterating through the array to prune any other $patch elements. + case mergeDirective: + return nil, nil, fmt.Errorf("merging lists cannot yet be specified in the patch") + default: + return nil, nil, mergepatch.ErrBadPatchType(patchType, typedV) + } + } + } + if replace { + return patchWithoutSpecialElements, nil, nil + } + return original, patchWithoutSpecialElements, nil +} + +// delete all matching entries (based on merge key) from a merging list +func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue interface{}) ([]interface{}, error) { + for { + _, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if !found { + break + } + // Delete the element at originalKey. + original = append(original[:originalKey], original[originalKey+1:]...) + } + return original, nil +} + +// mergeSliceWithoutSpecialElements merges slices with non-special elements. +// original and patch must be slices of maps, they should be checked before calling this function. +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { + for _, v := range patch { + typedV := v.(map[string]interface{}) + mergeValue, ok := typedV[mergeKey] + if !ok { + return nil, mergepatch.ErrNoMergeKey(typedV, mergeKey) + } + + // If we find a value with this merge key value in original, merge the + // maps. Otherwise append onto original. + originalMap, originalKey, found, err := findMapInSliceBasedOnKeyValue(original, mergeKey, mergeValue) + if err != nil { + return nil, err + } + + if found { + var mergedMaps interface{} + var err error + // Merge into original. + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) + if err != nil { + return nil, err + } + + original[originalKey] = mergedMaps + } else { + original = append(original, v) + } + } + return original, nil +} + +// deleteFromSlice uses the parallel list to delete the items in a list of scalars +func deleteFromSlice(current, toDelete []interface{}) []interface{} { + toDeleteMap := map[interface{}]interface{}{} + processed := make([]interface{}, 0, len(current)) + for _, v := range toDelete { + toDeleteMap[v] = true + } + for _, v := range current { + if _, found := toDeleteMap[v]; !found { + processed = append(processed, v) + } + } + return processed +} + +// This method no longer panics if any element of the slice is not a map. +func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{}) (map[string]interface{}, int, bool, error) { + for k, v := range m { + typedV, ok := v.(map[string]interface{}) + if !ok { + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) + } + + valueToMatch, ok := typedV[key] + if ok && valueToMatch == value { + return typedV, k, true, nil + } + } + + return nil, 0, false, nil +} + +// This function takes a JSON map and sorts all the lists that should be merged +// by key. This is needed by tests because in JSON, list order is significant, +// but in Strategic Merge Patch, merge lists do not have significant order. +// Sorting the lists allows for order-insensitive comparison of patched maps. +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { + var m map[string]interface{} + err := json.Unmarshal(mapJSON, &m) + if err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + + newM, err := sortMergeListsByNameMap(m, schema) + if err != nil { + return nil, err + } + + return json.Marshal(newM) +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { + newS := map[string]interface{}{} + for k, v := range s { + if k == retainKeysDirective { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForRetainKeys + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, deleteFromPrimitiveListDirectivePrefix) { + typedV, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForPrimitiveList + } + v = sortScalars(typedV) + } else if strings.HasPrefix(k, setElementOrderDirectivePrefix) { + _, ok := v.([]interface{}) + if !ok { + return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList + } + } else if k != directiveMarker { + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + if patchStrategy == mergeDirective { + var err error + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) + if err != nil { + return nil, err + } + } + } + } + + newS[k] = v + } + + return newS, nil +} + +// Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { + if len(s) == 0 { + return s, nil + } + + // We don't support lists of lists yet. + t, err := sliceElementType(s) + if err != nil { + return nil, err + } + + // If the elements are not maps... + if t.Kind() != reflect.Map { + // Sort the elements, because they may have been merged out of order. + return deduplicateAndSortScalars(s), nil + } + + // Elements are maps - if one of the keys of the map is a map or a + // list, we may need to recurse into it. + newS := []interface{}{} + for _, elem := range s { + if recurse { + typedElem := elem.(map[string]interface{}) + newElem, err := sortMergeListsByNameMap(typedElem, schema) + if err != nil { + return nil, err + } + + newS = append(newS, newElem) + } else { + newS = append(newS, elem) + } + } + + // Sort the maps. + newS = sortMapsBasedOnField(newS, mergeKey) + return newS, nil +} + +func sortMapsBasedOnField(m []interface{}, fieldName string) []interface{} { + mapM := mapSliceFromSlice(m) + ss := SortableSliceOfMaps{mapM, fieldName} + sort.Sort(ss) + newS := sliceFromMapSlice(ss.s) + return newS +} + +func mapSliceFromSlice(m []interface{}) []map[string]interface{} { + newM := []map[string]interface{}{} + for _, v := range m { + vt := v.(map[string]interface{}) + newM = append(newM, vt) + } + + return newM +} + +func sliceFromMapSlice(s []map[string]interface{}) []interface{} { + newS := []interface{}{} + for _, v := range s { + newS = append(newS, v) + } + + return newS +} + +type SortableSliceOfMaps struct { + s []map[string]interface{} + k string // key to sort on +} + +func (ss SortableSliceOfMaps) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfMaps) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i][ss.k]) + jStr := fmt.Sprintf("%v", ss.s[j][ss.k]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfMaps) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +func deduplicateAndSortScalars(s []interface{}) []interface{} { + s = deduplicateScalars(s) + return sortScalars(s) +} + +func sortScalars(s []interface{}) []interface{} { + ss := SortableSliceOfScalars{s} + sort.Sort(ss) + return ss.s +} + +func deduplicateScalars(s []interface{}) []interface{} { + // Clever algorithm to deduplicate. + length := len(s) - 1 + for i := 0; i < length; i++ { + for j := i + 1; j <= length; j++ { + if s[i] == s[j] { + s[j] = s[length] + s = s[0:length] + length-- + j-- + } + } + } + + return s +} + +type SortableSliceOfScalars struct { + s []interface{} +} + +func (ss SortableSliceOfScalars) Len() int { + return len(ss.s) +} + +func (ss SortableSliceOfScalars) Less(i, j int) bool { + iStr := fmt.Sprintf("%v", ss.s[i]) + jStr := fmt.Sprintf("%v", ss.s[j]) + return sort.StringsAreSorted([]string{iStr, jStr}) +} + +func (ss SortableSliceOfScalars) Swap(i, j int) { + tmp := ss.s[i] + ss.s[i] = ss.s[j] + ss.s[j] = tmp +} + +// Returns the type of the elements of N slice(s). If the type is different, +// another slice or undefined, returns an error. +func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { + var prevType reflect.Type + for _, s := range slices { + // Go through elements of all given slices and make sure they are all the same type. + for _, v := range s { + currentType := reflect.TypeOf(v) + if prevType == nil { + prevType = currentType + // We don't support lists of lists yet. + if prevType.Kind() == reflect.Slice { + return nil, mergepatch.ErrNoListOfLists + } + } else { + if prevType != currentType { + return nil, fmt.Errorf("list element types are not identical: %v", fmt.Sprint(slices)) + } + prevType = currentType + } + } + } + + if prevType == nil { + return nil, fmt.Errorf("no elements in any of the given slices") + } + + return prevType, nil +} + +// MergingMapsHaveConflicts returns true if the left and right JSON interface +// objects overlap with different values in any key. All keys are required to be +// strings. Since patches of the same Type have congruent keys, this is valid +// for multiple patch types. This method supports strategic merge patch semantics. +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") +} + +func mergingMapFieldsHaveConflicts( + left, right interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + switch leftType := left.(type) { + case map[string]interface{}: + rightType, ok := right.(map[string]interface{}) + if !ok { + return true, nil + } + leftMarker, okLeft := leftType[directiveMarker] + rightMarker, okRight := rightType[directiveMarker] + // if one or the other has a directive marker, + // then we need to consider that before looking at the individual keys, + // since a directive operates on the whole map. + if okLeft || okRight { + // if one has a directive marker and the other doesn't, + // then we have a conflict, since one is deleting or replacing the whole map, + // and the other is doing things to individual keys. + if okLeft != okRight { + return true, nil + } + // if they both have markers, but they are not the same directive, + // then we have a conflict because they're doing different things to the map. + if leftMarker != rightMarker { + return true, nil + } + } + if fieldPatchStrategy == replaceDirective { + return false, nil + } + // Check the individual keys. + return mapsHaveConflicts(leftType, rightType, schema) + + case []interface{}: + rightType, ok := right.([]interface{}) + if !ok { + return true, nil + } + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) + case string, float64, bool, int64, nil: + return !reflect.DeepEqual(left, right), nil + default: + return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left)) + } +} + +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if key != directiveMarker && key != retainKeysDirective { + if rightValue, ok := typedRight[key]; ok { + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + } + + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { + return true, err + } + } + } + } + + return false, nil +} + +func slicesHaveConflicts( + typedLeft, typedRight []interface{}, + schema LookupPatchMeta, + fieldPatchStrategy, fieldPatchMergeKey string, +) (bool, error) { + elementType, err := sliceElementType(typedLeft, typedRight) + if err != nil { + return true, err + } + + if fieldPatchStrategy == mergeDirective { + // Merging lists of scalars have no conflicts by definition + // So we only need to check further if the elements are maps + if elementType.Kind() != reflect.Map { + return false, nil + } + + // Build a map for each slice and then compare the two maps + leftMap, err := sliceOfMapsToMapOfMaps(typedLeft, fieldPatchMergeKey) + if err != nil { + return true, err + } + + rightMap, err := sliceOfMapsToMapOfMaps(typedRight, fieldPatchMergeKey) + if err != nil { + return true, err + } + + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) + } + + // Either we don't have type information, or these are non-merging lists + if len(typedLeft) != len(typedRight) { + return true, nil + } + + // Sort scalar slices to prevent ordering issues + // We have no way to sort non-merging lists of maps + if elementType.Kind() != reflect.Map { + typedLeft = deduplicateAndSortScalars(typedLeft) + typedRight = deduplicateAndSortScalars(typedRight) + } + + // Compare the slices element by element in order + // This test will fail if the slices are not sorted + for i := range typedLeft { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { + return true, err + } + } + + return false, nil +} + +func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]interface{}, error) { + result := make(map[string]interface{}, len(slice)) + for _, value := range slice { + typedValue, ok := value.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid element type in merging list:%v", slice) + } + + mergeValue, ok := typedValue[mergeKey] + if !ok { + return nil, fmt.Errorf("cannot find merge key `%s` in merging list element:%v", mergeKey, typedValue) + } + + result[fmt.Sprintf("%s", mergeValue)] = typedValue + } + + return result, nil +} + +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { + for key, leftValue := range typedLeft { + if rightValue, ok := typedRight[key]; ok { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { + return true, err + } + } + } + + return false, nil +} + +// CreateThreeWayMergePatch reconciles a modified configuration with an original configuration, +// while preserving any changes or deletions made to the original configuration in the interim, +// and not overridden by the current configuration. All three documents must be passed to the +// method as json encoded content. It will return a strategic merge patch, or an error if any +// of the documents is invalid, or if there are any preconditions that fail against the modified +// configuration, or, if overwrite is false and there are conflicts between the modified and current +// configurations. Conflicts are defined as keys changed differently from original to modified +// than from original to current. In other words, a conflict occurs if modified changes any key +// in a way that is different from how it is changed in current (e.g., deleting it, changing its +// value). We also propagate values fields that do not exist in original but are explicitly +// defined in modified. +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + originalMap := map[string]interface{}{} + if len(original) > 0 { + if err := json.Unmarshal(original, &originalMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + modifiedMap := map[string]interface{}{} + if len(modified) > 0 { + if err := json.Unmarshal(modified, &modifiedMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + currentMap := map[string]interface{}{} + if len(current) > 0 { + if err := json.Unmarshal(current, ¤tMap); err != nil { + return nil, mergepatch.ErrBadJSONDoc + } + } + + // The patch is the difference from current to modified without deletions, plus deletions + // from original to modified. To find it, we compute deletions, which are the deletions from + // original to modified, and delta, which is the difference from current to modified without + // deletions, and then apply delta to deletions as a patch, which should be strictly additive. + deltaMapDiffOptions := DiffOptions{ + IgnoreDeletions: true, + SetElementOrder: true, + } + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) + if err != nil { + return nil, err + } + deletionsMapDiffOptions := DiffOptions{ + SetElementOrder: true, + IgnoreChangesAndAdditions: true, + } + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) + if err != nil { + return nil, err + } + + mergeOptions := MergeOptions{} + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) + if err != nil { + return nil, err + } + + // Apply the preconditions to the patch, and return an error if any of them fail. + for _, fn := range fns { + if !fn(patchMap) { + return nil, mergepatch.NewErrPreconditionFailed(patchMap) + } + } + + // If overwrite is false, and the patch contains any keys that were changed differently, + // then return a conflict error. + if !overwrite { + changeMapDiffOptions := DiffOptions{} + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) + if err != nil { + return nil, err + } + + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) + if err != nil { + return nil, err + } + + if hasConflicts { + return nil, mergepatch.NewErrConflict(mergepatch.ToYAMLOrError(patchMap), mergepatch.ToYAMLOrError(changedMap)) + } + } + + return json.Marshal(patchMap) +} + +func ItemAddedToModifiedSlice(original, modified string) bool { return original > modified } + +func ItemRemovedFromModifiedSlice(original, modified string) bool { return original < modified } + +func ItemMatchesOriginalAndModifiedSlice(original, modified string) bool { return original == modified } + +func CreateDeleteDirective(mergeKey string, mergeKeyValue interface{}) map[string]interface{} { + return map[string]interface{}{mergeKey: mergeKeyValue, directiveMarker: deleteDirective} +} + +func mapTypeAssertion(original, patch interface{}) (map[string]interface{}, map[string]interface{}, error) { + typedOriginal, ok := original.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.(map[string]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +func sliceTypeAssertion(original, patch interface{}) ([]interface{}, []interface{}, error) { + typedOriginal, ok := original.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedOriginal, original) + } + typedPatch, ok := patch.([]interface{}) + if !ok { + return nil, nil, mergepatch.ErrBadArgType(typedPatch, patch) + } + return typedOriginal, typedPatch, nil +} + +// extractRetainKeysPatchStrategy process patch strategy, which is a string may contains multiple +// patch strategies separated by ",". It returns a boolean var indicating if it has +// retainKeys strategies and a string for the other strategy. +func extractRetainKeysPatchStrategy(strategies []string) (bool, string, error) { + switch len(strategies) { + case 0: + return false, "", nil + case 1: + singleStrategy := strategies[0] + switch singleStrategy { + case retainKeysStrategy: + return true, "", nil + default: + return false, singleStrategy, nil + } + case 2: + switch { + case strategies[0] == retainKeysStrategy: + return true, strategies[1], nil + case strategies[1] == retainKeysStrategy: + return true, strategies[0], nil + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } + default: + return false, "", fmt.Errorf("unexpected patch strategy: %v", strategies) + } +} + +// hasAdditionalNewField returns if original map has additional key with non-nil value than modified. +func hasAdditionalNewField(original, modified map[string]interface{}) bool { + for k, v := range original { + if v == nil { + continue + } + if _, found := modified[k]; !found { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 00000000..f84d65aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS new file mode 100644 index 00000000..349bc69d --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - pwittrock +reviewers: + - apelisse diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go new file mode 100644 index 00000000..5b8514b3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -0,0 +1,513 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package json is forked from the Go standard library to enable us to find the +// field of a struct that a given JSON key maps to. +package json + +import ( + "bytes" + "fmt" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +const ( + patchStrategyTagKey = "patchStrategy" + patchMergeKeyTagKey = "patchMergeKey" +) + +// Finds the patchStrategy and patchMergeKey struct tag fields on a given +// struct field given the struct type and the JSON name of the field. +// It returns field type, a slice of patch strategies, merge key and error. +// TODO: fix the returned errors to be introspectable. +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( + elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { + if t.Kind() == reflect.Pointer { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", + t.Kind().String()) + return + } + jf := []byte(jsonField) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, jf) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, jf) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential struct field. + tjf := t.Field(f.index[0]) + // we must navigate down all the anonymously included structs in the chain + for i := 1; i < len(f.index); i++ { + tjf = tjf.Type.Field(f.index[i]) + } + patchStrategy := tjf.Tag.Get(patchStrategyTagKey) + patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey) + patchStrategies = strings.Split(patchStrategy, ",") + elemType = tjf.Type + return + } + e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField) + return +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + // index is the sequence of indexes from the containing type fields to this field. + // it is a slice because anonymous structs will need multiple navigation steps to correctly + // resolve the proper fields + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func (f field) String() string { + return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted) +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Pointer { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/k8s.io/client-go/metadata/interface.go b/vendor/k8s.io/client-go/metadata/interface.go new file mode 100644 index 00000000..127c3950 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/interface.go @@ -0,0 +1,49 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// Interface allows a caller to get the metadata (in the form of PartialObjectMetadata objects) +// from any Kubernetes compatible resource API. +type Interface interface { + Resource(resource schema.GroupVersionResource) Getter +} + +// ResourceInterface contains the set of methods that may be invoked on objects by their metadata. +// Update is not supported by the server, but Patch can be used for the actions Update would handle. +type ResourceInterface interface { + Delete(ctx context.Context, name string, options metav1.DeleteOptions, subresources ...string) error + DeleteCollection(ctx context.Context, options metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(ctx context.Context, name string, options metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) + List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, options metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) +} + +// Getter handles both namespaced and non-namespaced resource types consistently. +type Getter interface { + Namespace(string) ResourceInterface + ResourceInterface +} diff --git a/vendor/k8s.io/client-go/metadata/metadata.go b/vendor/k8s.io/client-go/metadata/metadata.go new file mode 100644 index 00000000..8152aa12 --- /dev/null +++ b/vendor/k8s.io/client-go/metadata/metadata.go @@ -0,0 +1,331 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metadata + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "k8s.io/klog/v2" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/rest" +) + +var deleteScheme = runtime.NewScheme() +var parameterScheme = runtime.NewScheme() +var deleteOptionsCodec = serializer.NewCodecFactory(deleteScheme) +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) + metav1.AddToGroupVersion(deleteScheme, versionV1) +} + +// Client allows callers to retrieve the object metadata for any +// Kubernetes-compatible API endpoint. The client uses the +// meta.k8s.io/v1 PartialObjectMetadata resource to more efficiently +// retrieve just the necessary metadata, but on older servers +// (Kubernetes 1.14 and before) will retrieve the object and then +// convert the metadata. +type Client struct { + client *rest.RESTClient +} + +var _ Interface = &Client{} + +// ConfigFor returns a copy of the provided config with the +// appropriate metadata client defaults set. +func ConfigFor(inConfig *rest.Config) *rest.Config { + config := rest.CopyConfig(inConfig) + config.AcceptContentTypes = "application/vnd.kubernetes.protobuf,application/json" + config.ContentType = "application/vnd.kubernetes.protobuf" + config.NegotiatedSerializer = metainternalversionscheme.Codecs.WithoutConversion() + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + return config +} + +// NewForConfigOrDie creates a new metadata client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) Interface { + ret, err := NewForConfig(c) + if err != nil { + panic(err) + } + return ret +} + +// NewForConfig creates a new metadata client that can retrieve object +// metadata details about any Kubernetes object (core, aggregated, or custom +// resource based) in the form of PartialObjectMetadata objects, or returns +// an error. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(inConfig *rest.Config) (Interface, error) { + config := ConfigFor(inConfig) + + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(config, httpClient) +} + +// NewForConfigAndClient creates a new metadata client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(inConfig *rest.Config, h *http.Client) (Interface, error) { + config := ConfigFor(inConfig) + // for serializing the options + config.GroupVersion = &schema.GroupVersion{} + config.APIPath = "/this-value-should-never-be-sent" + + restClient, err := rest.RESTClientForConfigAndClient(config, h) + if err != nil { + return nil, err + } + + return &Client{client: restClient}, nil +} + +type client struct { + client *Client + namespace string + resource schema.GroupVersionResource +} + +// Resource returns an interface that can access cluster or namespace +// scoped instances of resource. +func (c *Client) Resource(resource schema.GroupVersionResource) Getter { + return &client{client: c, resource: resource} +} + +// Namespace returns an interface that can access namespace-scoped instances of the +// provided resource. +func (c *client) Namespace(ns string) ResourceInterface { + ret := *c + ret.namespace = ns + return &ret +} + +// Delete removes the provided resource from the server. +func (c *client) Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error { + if len(name) == 0 { + return fmt.Errorf("name is required") + } + // if DeleteOptions are delivered to Negotiator for serialization, + // HTTP-Request header will bring "Content-Type: application/vnd.kubernetes.protobuf" + // apiextensions-apiserver uses unstructuredNegotiatedSerializer to decode the input, + // server-side will reply with 406 errors. + // The special treatment here is to be compatible with CRD Handler + // see: https://github.com/kubernetes/kubernetes/blob/1a845ccd076bbf1b03420fe694c85a5cd3bd6bed/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_handler.go#L843 + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + Do(ctx) + return result.Error() +} + +// DeleteCollection triggers deletion of all resources in the specified scope (namespace or cluster). +func (c *client) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOptions metav1.ListOptions) error { + // See comment on Delete + deleteOptionsByte, err := runtime.Encode(deleteOptionsCodec.LegacyCodec(schema.GroupVersion{Version: "v1"}), &opts) + if err != nil { + return err + } + + result := c.client.client. + Delete(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Content-Type", runtime.ContentTypeJSON). + Body(deleteOptionsByte). + SpecificallyVersionedParams(&listOptions, dynamicParameterCodec, versionV1). + Do(ctx) + return result.Error() +} + +// Get returns the resource with name from the specified scope (namespace or cluster). +func (c *client) Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client.Get().AbsPath(append(c.makeURLSegments(name), subresources...)...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadata: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema: %#v", partial) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// List returns all resources within the specified scope (namespace or cluster). +func (c *client) List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error) { + result := c.client.client.Get().AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadataList;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + klog.V(5).Infof("Unable to retrieve PartialObjectMetadataList: %#v", err) + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadataList + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadataList: %v", err) + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +// Watch finds all changes to the resources in the specified scope (namespace or cluster). +func (c *client) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.client.Get(). + AbsPath(c.makeURLSegments("")...). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Timeout(timeout). + Watch(ctx) +} + +// Patch modifies the named resource in the specified scope (namespace or cluster). +func (c *client) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error) { + if len(name) == 0 { + return nil, fmt.Errorf("name is required") + } + result := c.client.client. + Patch(pt). + AbsPath(append(c.makeURLSegments(name), subresources...)...). + Body(data). + SetHeader("Accept", "application/vnd.kubernetes.protobuf;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json;as=PartialObjectMetadata;g=meta.k8s.io;v=v1,application/json"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + obj, err := result.Get() + if runtime.IsNotRegisteredError(err) { + rawBytes, err := result.Raw() + if err != nil { + return nil, err + } + var partial metav1.PartialObjectMetadata + if err := json.Unmarshal(rawBytes, &partial); err != nil { + return nil, fmt.Errorf("unable to decode returned object as PartialObjectMetadata: %v", err) + } + if !isLikelyObjectMetadata(&partial) { + return nil, fmt.Errorf("object does not appear to match the ObjectMeta schema") + } + partial.TypeMeta = metav1.TypeMeta{} + return &partial, nil + } + if err != nil { + return nil, err + } + partial, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return nil, fmt.Errorf("unexpected object, expected PartialObjectMetadata but got %T", obj) + } + return partial, nil +} + +func (c *client) makeURLSegments(name string) []string { + url := []string{} + if len(c.resource.Group) == 0 { + url = append(url, "api") + } else { + url = append(url, "apis", c.resource.Group) + } + url = append(url, c.resource.Version) + + if len(c.namespace) > 0 { + url = append(url, "namespaces", c.namespace) + } + url = append(url, c.resource.Resource) + + if len(name) > 0 { + url = append(url, name) + } + + return url +} + +func isLikelyObjectMetadata(meta *metav1.PartialObjectMetadata) bool { + return len(meta.UID) > 0 || !meta.CreationTimestamp.IsZero() || len(meta.Name) > 0 || len(meta.GenerateName) > 0 +} diff --git a/vendor/k8s.io/client-go/restmapper/category_expansion.go b/vendor/k8s.io/client-go/restmapper/category_expansion.go new file mode 100644 index 00000000..484e4c83 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/category_expansion.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// CategoryExpander maps category strings to GroupResources. +// Categories are classification or 'tag' of a group of resources. +type CategoryExpander interface { + Expand(category string) ([]schema.GroupResource, bool) +} + +// SimpleCategoryExpander implements CategoryExpander interface +// using a static mapping of categories to GroupResource mapping. +type SimpleCategoryExpander struct { + Expansions map[string][]schema.GroupResource +} + +// Expand fulfills CategoryExpander +func (e SimpleCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret, ok := e.Expansions[category] + return ret, ok +} + +// discoveryCategoryExpander struct lets a REST Client wrapper (discoveryClient) to retrieve list of APIResourceList, +// and then convert to fallbackExpander +type discoveryCategoryExpander struct { + discoveryClient discovery.DiscoveryInterface +} + +// NewDiscoveryCategoryExpander returns a category expander that makes use of the "categories" fields from +// the API, found through the discovery client. In case of any error or no category found (which likely +// means we're at a cluster prior to categories support, fallback to the expander provided. +func NewDiscoveryCategoryExpander(client discovery.DiscoveryInterface) CategoryExpander { + if client == nil { + panic("Please provide discovery client to shortcut expander") + } + return discoveryCategoryExpander{discoveryClient: client} +} + +// Expand fulfills CategoryExpander +func (e discoveryCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + // Get all supported resources for groups and versions from server, if no resource found, fallback anyway. + _, apiResourceLists, _ := e.discoveryClient.ServerGroupsAndResources() + if len(apiResourceLists) == 0 { + return nil, false + } + + discoveredExpansions := map[string][]schema.GroupResource{} + for _, apiResourceList := range apiResourceLists { + gv, err := schema.ParseGroupVersion(apiResourceList.GroupVersion) + if err != nil { + continue + } + // Collect GroupVersions by categories + for _, apiResource := range apiResourceList.APIResources { + if categories := apiResource.Categories; len(categories) > 0 { + for _, category := range categories { + groupResource := schema.GroupResource{ + Group: gv.Group, + Resource: apiResource.Name, + } + discoveredExpansions[category] = append(discoveredExpansions[category], groupResource) + } + } + } + } + + ret, ok := discoveredExpansions[category] + return ret, ok +} + +// UnionCategoryExpander implements CategoryExpander interface. +// It maps given category string to union of expansions returned by all the CategoryExpanders in the list. +type UnionCategoryExpander []CategoryExpander + +// Expand fulfills CategoryExpander +func (u UnionCategoryExpander) Expand(category string) ([]schema.GroupResource, bool) { + ret := []schema.GroupResource{} + ok := false + + // Expand the category for each CategoryExpander in the list and merge/combine the results. + for _, expansion := range u { + curr, currOk := expansion.Expand(category) + + for _, currGR := range curr { + found := false + for _, existing := range ret { + if existing == currGR { + found = true + break + } + } + if !found { + ret = append(ret, currGR) + } + } + ok = ok || currOk + } + + return ret, ok +} diff --git a/vendor/k8s.io/client-go/restmapper/discovery.go b/vendor/k8s.io/client-go/restmapper/discovery.go new file mode 100644 index 00000000..3505178b --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/discovery.go @@ -0,0 +1,338 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + + "k8s.io/klog/v2" +) + +// APIGroupResources is an API group with a mapping of versions to +// resources. +type APIGroupResources struct { + Group metav1.APIGroup + // A mapping of version string to a slice of APIResources for + // that version. + VersionedResources map[string][]metav1.APIResource +} + +// NewDiscoveryRESTMapper returns a PriorityRESTMapper based on the discovered +// groups and resources passed in. +func NewDiscoveryRESTMapper(groupResources []*APIGroupResources) meta.RESTMapper { + unionMapper := meta.MultiRESTMapper{} + + var groupPriority []string + // /v1 is special. It should always come first + resourcePriority := []schema.GroupVersionResource{{Group: "", Version: "v1", Resource: meta.AnyResource}} + kindPriority := []schema.GroupVersionKind{{Group: "", Version: "v1", Kind: meta.AnyKind}} + + for _, group := range groupResources { + groupPriority = append(groupPriority, group.Group.Name) + + // Make sure the preferred version comes first + if len(group.Group.PreferredVersion.Version) != 0 { + preferred := group.Group.PreferredVersion.Version + if _, ok := group.VersionedResources[preferred]; ok { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: group.Group.PreferredVersion.Version, + Kind: meta.AnyKind, + }) + } + } + + for _, discoveryVersion := range group.Group.Versions { + resources, ok := group.VersionedResources[discoveryVersion.Version] + if !ok { + continue + } + + // Add non-preferred versions after the preferred version, in case there are resources that only exist in those versions + if discoveryVersion.Version != group.Group.PreferredVersion.Version { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Resource: meta.AnyResource, + }) + + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group.Group.Name, + Version: discoveryVersion.Version, + Kind: meta.AnyKind, + }) + } + + gv := schema.GroupVersion{Group: group.Group.Name, Version: discoveryVersion.Version} + versionMapper := meta.NewDefaultRESTMapper([]schema.GroupVersion{gv}) + + for _, resource := range resources { + scope := meta.RESTScopeNamespace + if !resource.Namespaced { + scope = meta.RESTScopeRoot + } + + // if we have a slash, then this is a subresource and we shouldn't create mappings for those. + if strings.Contains(resource.Name, "/") { + continue + } + + plural := gv.WithResource(resource.Name) + singular := gv.WithResource(resource.SingularName) + // this is for legacy resources and servers which don't list singular forms. For those we must still guess. + if len(resource.SingularName) == 0 { + _, singular = meta.UnsafeGuessKindToResource(gv.WithKind(resource.Kind)) + } + + versionMapper.AddSpecific(gv.WithKind(strings.ToLower(resource.Kind)), plural, singular, scope) + versionMapper.AddSpecific(gv.WithKind(resource.Kind), plural, singular, scope) + // TODO this is producing unsafe guesses that don't actually work, but it matches previous behavior + versionMapper.Add(gv.WithKind(resource.Kind+"List"), scope) + } + // TODO why is this type not in discovery (at least for "v1") + versionMapper.Add(gv.WithKind("List"), meta.RESTScopeRoot) + unionMapper = append(unionMapper, versionMapper) + } + } + + for _, group := range groupPriority { + resourcePriority = append(resourcePriority, schema.GroupVersionResource{ + Group: group, + Version: meta.AnyVersion, + Resource: meta.AnyResource, + }) + kindPriority = append(kindPriority, schema.GroupVersionKind{ + Group: group, + Version: meta.AnyVersion, + Kind: meta.AnyKind, + }) + } + + return meta.PriorityRESTMapper{ + Delegate: unionMapper, + ResourcePriority: resourcePriority, + KindPriority: kindPriority, + } +} + +// GetAPIGroupResources uses the provided discovery client to gather +// discovery information and populate a slice of APIGroupResources. +func GetAPIGroupResources(cl discovery.DiscoveryInterface) ([]*APIGroupResources, error) { + gs, rs, err := cl.ServerGroupsAndResources() + if rs == nil || gs == nil { + return nil, err + // TODO track the errors and update callers to handle partial errors. + } + rsm := map[string]*metav1.APIResourceList{} + for _, r := range rs { + rsm[r.GroupVersion] = r + } + + var result []*APIGroupResources + for _, group := range gs { + groupResources := &APIGroupResources{ + Group: *group, + VersionedResources: make(map[string][]metav1.APIResource), + } + for _, version := range group.Versions { + resources, ok := rsm[version.GroupVersion] + if !ok { + continue + } + groupResources.VersionedResources[version.Version] = resources.APIResources + } + result = append(result, groupResources) + } + return result, nil +} + +// DeferredDiscoveryRESTMapper is a RESTMapper that will defer +// initialization of the RESTMapper until the first mapping is +// requested. +type DeferredDiscoveryRESTMapper struct { + initMu sync.Mutex + delegate meta.RESTMapper + cl discovery.CachedDiscoveryInterface +} + +// NewDeferredDiscoveryRESTMapper returns a +// DeferredDiscoveryRESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +func NewDeferredDiscoveryRESTMapper(cl discovery.CachedDiscoveryInterface) *DeferredDiscoveryRESTMapper { + return &DeferredDiscoveryRESTMapper{ + cl: cl, + } +} + +func (d *DeferredDiscoveryRESTMapper) getDelegate() (meta.RESTMapper, error) { + d.initMu.Lock() + defer d.initMu.Unlock() + + if d.delegate != nil { + return d.delegate, nil + } + + groupResources, err := GetAPIGroupResources(d.cl) + if err != nil { + return nil, err + } + + d.delegate = NewDiscoveryRESTMapper(groupResources) + return d.delegate, nil +} + +// Reset resets the internally cached Discovery information and will +// cause the next mapping request to re-discover. +func (d *DeferredDiscoveryRESTMapper) Reset() { + klog.V(5).Info("Invalidating discovery information") + + d.initMu.Lock() + defer d.initMu.Unlock() + + d.cl.Invalidate() + d.delegate = nil +} + +// KindFor takes a partial resource and returns back the single match. +// It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) KindFor(resource schema.GroupVersionResource) (gvk schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionKind{}, err + } + gvk, err = del.KindFor(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvk, err = d.KindFor(resource) + } + return +} + +// KindsFor takes a partial resource and returns back the list of +// potential kinds in priority order. +func (d *DeferredDiscoveryRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvks []schema.GroupVersionKind, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvks, err = del.KindsFor(resource) + if len(gvks) == 0 && !d.cl.Fresh() { + d.Reset() + gvks, err = d.KindsFor(resource) + } + return +} + +// ResourceFor takes a partial resource and returns back the single +// match. It returns an error if there are multiple matches. +func (d *DeferredDiscoveryRESTMapper) ResourceFor(input schema.GroupVersionResource) (gvr schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return schema.GroupVersionResource{}, err + } + gvr, err = del.ResourceFor(input) + if err != nil && !d.cl.Fresh() { + d.Reset() + gvr, err = d.ResourceFor(input) + } + return +} + +// ResourcesFor takes a partial resource and returns back the list of +// potential resource in priority order. +func (d *DeferredDiscoveryRESTMapper) ResourcesFor(input schema.GroupVersionResource) (gvrs []schema.GroupVersionResource, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + gvrs, err = del.ResourcesFor(input) + if len(gvrs) == 0 && !d.cl.Fresh() { + d.Reset() + gvrs, err = d.ResourcesFor(input) + } + return +} + +// RESTMapping identifies a preferred resource mapping for the +// provided group kind. +func (d *DeferredDiscoveryRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (m *meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + m, err = del.RESTMapping(gk, versions...) + if err != nil && !d.cl.Fresh() { + d.Reset() + m, err = d.RESTMapping(gk, versions...) + } + return +} + +// RESTMappings returns the RESTMappings for the provided group kind +// in a rough internal preferred order. If no kind is found, it will +// return a NoResourceMatchError. +func (d *DeferredDiscoveryRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) (ms []*meta.RESTMapping, err error) { + del, err := d.getDelegate() + if err != nil { + return nil, err + } + ms, err = del.RESTMappings(gk, versions...) + if len(ms) == 0 && !d.cl.Fresh() { + d.Reset() + ms, err = d.RESTMappings(gk, versions...) + } + return +} + +// ResourceSingularizer converts a resource name from plural to +// singular (e.g., from pods to pod). +func (d *DeferredDiscoveryRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + del, err := d.getDelegate() + if err != nil { + return resource, err + } + singular, err = del.ResourceSingularizer(resource) + if err != nil && !d.cl.Fresh() { + d.Reset() + singular, err = d.ResourceSingularizer(resource) + } + return +} + +func (d *DeferredDiscoveryRESTMapper) String() string { + del, err := d.getDelegate() + if err != nil { + return fmt.Sprintf("DeferredDiscoveryRESTMapper{%v}", err) + } + return fmt.Sprintf("DeferredDiscoveryRESTMapper{\n\t%v\n}", del) +} + +// Make sure it satisfies the interface +var _ meta.ResettableRESTMapper = &DeferredDiscoveryRESTMapper{} diff --git a/vendor/k8s.io/client-go/restmapper/shortcut.go b/vendor/k8s.io/client-go/restmapper/shortcut.go new file mode 100644 index 00000000..7ab3cd46 --- /dev/null +++ b/vendor/k8s.io/client-go/restmapper/shortcut.go @@ -0,0 +1,187 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restmapper + +import ( + "strings" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" +) + +// shortcutExpander is a RESTMapper that can be used for Kubernetes resources. It expands the resource first, then invokes the wrapped +type shortcutExpander struct { + RESTMapper meta.RESTMapper + + discoveryClient discovery.DiscoveryInterface +} + +var _ meta.ResettableRESTMapper = shortcutExpander{} + +// NewShortcutExpander wraps a restmapper in a layer that expands shortcuts found via discovery +func NewShortcutExpander(delegate meta.RESTMapper, client discovery.DiscoveryInterface) meta.RESTMapper { + return shortcutExpander{RESTMapper: delegate, discoveryClient: client} +} + +// KindFor fulfills meta.RESTMapper +func (e shortcutExpander) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + // expandResourceShortcut works with current API resources as read from discovery cache. + // In case of new CRDs this means we potentially don't have current state of discovery. + // In the current wiring in k8s.io/cli-runtime/pkg/genericclioptions/config_flags.go#toRESTMapper, + // we are using DeferredDiscoveryRESTMapper which on KindFor failure will clear the + // cache and fetch all data from a cluster (see vendor/k8s.io/client-go/restmapper/discovery.go#KindFor). + // Thus another call to expandResourceShortcut, after a NoMatchError should successfully + // read Kind to the user or an error. + gvk, err := e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + if meta.IsNoMatchError(err) { + return e.RESTMapper.KindFor(e.expandResourceShortcut(resource)) + } + return gvk, err +} + +// KindsFor fulfills meta.RESTMapper +func (e shortcutExpander) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + return e.RESTMapper.KindsFor(e.expandResourceShortcut(resource)) +} + +// ResourcesFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return e.RESTMapper.ResourcesFor(e.expandResourceShortcut(resource)) +} + +// ResourceFor fulfills meta.RESTMapper +func (e shortcutExpander) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + return e.RESTMapper.ResourceFor(e.expandResourceShortcut(resource)) +} + +// ResourceSingularizer fulfills meta.RESTMapper +func (e shortcutExpander) ResourceSingularizer(resource string) (string, error) { + return e.RESTMapper.ResourceSingularizer(e.expandResourceShortcut(schema.GroupVersionResource{Resource: resource}).Resource) +} + +// RESTMapping fulfills meta.RESTMapper +func (e shortcutExpander) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + return e.RESTMapper.RESTMapping(gk, versions...) +} + +// RESTMappings fulfills meta.RESTMapper +func (e shortcutExpander) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + return e.RESTMapper.RESTMappings(gk, versions...) +} + +// getShortcutMappings returns a set of tuples which holds short names for resources. +// First the list of potential resources will be taken from the API server. +// Next we will append the hardcoded list of resources - to be backward compatible with old servers. +// NOTE that the list is ordered by group priority. +func (e shortcutExpander) getShortcutMappings() ([]*metav1.APIResourceList, []resourceShortcuts, error) { + res := []resourceShortcuts{} + // get server resources + // This can return an error *and* the results it was able to find. We don't need to fail on the error. + _, apiResList, err := e.discoveryClient.ServerGroupsAndResources() + if err != nil { + klog.V(1).Infof("Error loading discovery information: %v", err) + } + for _, apiResources := range apiResList { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + klog.V(1).Infof("Unable to parse groupversion = %s due to = %s", apiResources.GroupVersion, err.Error()) + continue + } + for _, apiRes := range apiResources.APIResources { + for _, shortName := range apiRes.ShortNames { + rs := resourceShortcuts{ + ShortForm: schema.GroupResource{Group: gv.Group, Resource: shortName}, + LongForm: schema.GroupResource{Group: gv.Group, Resource: apiRes.Name}, + } + res = append(res, rs) + } + } + } + + return apiResList, res, nil +} + +// expandResourceShortcut will return the expanded version of resource +// (something that a pkg/api/meta.RESTMapper can understand), if it is +// indeed a shortcut. If no match has been found, we will match on group prefixing. +// Lastly we will return resource unmodified. +func (e shortcutExpander) expandResourceShortcut(resource schema.GroupVersionResource) schema.GroupVersionResource { + // get the shortcut mappings and return on first match. + if allResources, shortcutResources, err := e.getShortcutMappings(); err == nil { + // avoid expanding if there's an exact match to a full resource name + for _, apiResources := range allResources { + gv, err := schema.ParseGroupVersion(apiResources.GroupVersion) + if err != nil { + continue + } + if len(resource.Group) != 0 && resource.Group != gv.Group { + continue + } + for _, apiRes := range apiResources.APIResources { + if resource.Resource == apiRes.Name { + return resource + } + if resource.Resource == apiRes.SingularName { + return resource + } + } + } + + for _, item := range shortcutResources { + if len(resource.Group) != 0 && resource.Group != item.ShortForm.Group { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + + // we didn't find exact match so match on group prefixing. This allows autoscal to match autoscaling + if len(resource.Group) == 0 { + return resource + } + for _, item := range shortcutResources { + if !strings.HasPrefix(item.ShortForm.Group, resource.Group) { + continue + } + if resource.Resource == item.ShortForm.Resource { + resource.Resource = item.LongForm.Resource + resource.Group = item.LongForm.Group + return resource + } + } + } + + return resource +} + +func (e shortcutExpander) Reset() { + meta.MaybeResetRESTMapper(e.RESTMapper) +} + +// ResourceShortcuts represents a structure that holds the information how to +// transition from resource's shortcut to its full name. +type resourceShortcuts struct { + ShortForm schema.GroupResource + LongForm schema.GroupResource +} diff --git a/vendor/k8s.io/client-go/tools/cache/OWNERS b/vendor/k8s.io/client-go/tools/cache/OWNERS new file mode 100644 index 00000000..726205b3 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/OWNERS @@ -0,0 +1,28 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - caesarxuchao + - liggitt + - ncdc +reviewers: + - thockin + - lavalamp + - smarterclayton + - wojtek-t + - deads2k + - derekwaynecarr + - caesarxuchao + - mikedanese + - liggitt + - janetkuo + - justinsb + - soltysh + - jsafrane + - dims + - ingvagabund + - ncdc diff --git a/vendor/k8s.io/client-go/tools/cache/controller.go b/vendor/k8s.io/client-go/tools/cache/controller.go new file mode 100644 index 00000000..0762da3b --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/controller.go @@ -0,0 +1,495 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + "time" + + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/clock" +) + +// This file implements a low-level controller that is used in +// sharedIndexInformer, which is an implementation of +// SharedIndexInformer. Such informers, in turn, are key components +// in the high level controllers that form the backbone of the +// Kubernetes control plane. Look at those for examples, or the +// example in +// https://github.com/kubernetes/client-go/tree/master/examples/workqueue +// . + +// Config contains all the settings for one of these low-level controllers. +type Config struct { + // The queue for your objects - has to be a DeltaFIFO due to + // assumptions in the implementation. Your Process() function + // should accept the output of this Queue's Pop() method. + Queue + + // Something that can list and watch your objects. + ListerWatcher + + // Something that can process a popped Deltas. + Process ProcessFunc + + // ObjectType is an example object of the type this controller is + // expected to handle. Only the type needs to be right, except + // that when that is `unstructured.Unstructured` the object's + // `"apiVersion"` and `"kind"` must also be right. + ObjectType runtime.Object + + // FullResyncPeriod is the period at which ShouldResync is considered. + FullResyncPeriod time.Duration + + // ShouldResync is periodically used by the reflector to determine + // whether to Resync the Queue. If ShouldResync is `nil` or + // returns true, it means the reflector should proceed with the + // resync. + ShouldResync ShouldResyncFunc + + // If true, when Process() returns an error, re-enqueue the object. + // TODO: add interface to let you inject a delay/backoff or drop + // the object completely if desired. Pass the object in + // question to this interface as a parameter. This is probably moot + // now that this functionality appears at a higher level. + RetryOnError bool + + // Called whenever the ListAndWatch drops the connection with an error. + WatchErrorHandler WatchErrorHandler + + // WatchListPageSize is the requested chunk size of initial and relist watch lists. + WatchListPageSize int64 +} + +// ShouldResyncFunc is a type of function that indicates if a reflector should perform a +// resync or not. It can be used by a shared informer to support multiple event handlers with custom +// resync periods. +type ShouldResyncFunc func() bool + +// ProcessFunc processes a single object. +type ProcessFunc func(obj interface{}) error + +// `*controller` implements Controller +type controller struct { + config Config + reflector *Reflector + reflectorMutex sync.RWMutex + clock clock.Clock +} + +// Controller is a low-level controller that is parameterized by a +// Config and used in sharedIndexInformer. +type Controller interface { + // Run does two things. One is to construct and run a Reflector + // to pump objects/notifications from the Config's ListerWatcher + // to the Config's Queue and possibly invoke the occasional Resync + // on that Queue. The other is to repeatedly Pop from the Queue + // and process with the Config's ProcessFunc. Both of these + // continue until `stopCh` is closed. + Run(stopCh <-chan struct{}) + + // HasSynced delegates to the Config's Queue + HasSynced() bool + + // LastSyncResourceVersion delegates to the Reflector when there + // is one, otherwise returns the empty string + LastSyncResourceVersion() string +} + +// New makes a new Controller from the given Config. +func New(c *Config) Controller { + ctlr := &controller{ + config: *c, + clock: &clock.RealClock{}, + } + return ctlr +} + +// Run begins processing items, and will continue until a value is sent down stopCh or it is closed. +// It's an error to call Run more than once. +// Run blocks; call via go. +func (c *controller) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + go func() { + <-stopCh + c.config.Queue.Close() + }() + r := NewReflector( + c.config.ListerWatcher, + c.config.ObjectType, + c.config.Queue, + c.config.FullResyncPeriod, + ) + r.ShouldResync = c.config.ShouldResync + r.WatchListPageSize = c.config.WatchListPageSize + r.clock = c.clock + if c.config.WatchErrorHandler != nil { + r.watchErrorHandler = c.config.WatchErrorHandler + } + + c.reflectorMutex.Lock() + c.reflector = r + c.reflectorMutex.Unlock() + + var wg wait.Group + + wg.StartWithChannel(stopCh, r.Run) + + wait.Until(c.processLoop, time.Second, stopCh) + wg.Wait() +} + +// Returns true once this controller has completed an initial resource listing +func (c *controller) HasSynced() bool { + return c.config.Queue.HasSynced() +} + +func (c *controller) LastSyncResourceVersion() string { + c.reflectorMutex.RLock() + defer c.reflectorMutex.RUnlock() + if c.reflector == nil { + return "" + } + return c.reflector.LastSyncResourceVersion() +} + +// processLoop drains the work queue. +// TODO: Consider doing the processing in parallel. This will require a little thought +// to make sure that we don't end up processing the same object multiple times +// concurrently. +// +// TODO: Plumb through the stopCh here (and down to the queue) so that this can +// actually exit when the controller is stopped. Or just give up on this stuff +// ever being stoppable. Converting this whole package to use Context would +// also be helpful. +func (c *controller) processLoop() { + for { + obj, err := c.config.Queue.Pop(PopProcessFunc(c.config.Process)) + if err != nil { + if err == ErrFIFOClosed { + return + } + if c.config.RetryOnError { + // This is the safe way to re-enqueue. + c.config.Queue.AddIfNotPresent(obj) + } + } + } +} + +// ResourceEventHandler can handle notifications for events that +// happen to a resource. The events are informational only, so you +// can't return an error. The handlers MUST NOT modify the objects +// received; this concerns not only the top level of structure but all +// the data structures reachable from it. +// - OnAdd is called when an object is added. +// - OnUpdate is called when an object is modified. Note that oldObj is the +// last known state of the object-- it is possible that several changes +// were combined together, so you can't use this to see every single +// change. OnUpdate is also called when a re-list happens, and it will +// get called even if nothing changed. This is useful for periodically +// evaluating or syncing something. +// - OnDelete will get the final state of the item if it is known, otherwise +// it will get an object of type DeletedFinalStateUnknown. This can +// happen if the watch is closed and misses the delete event and we don't +// notice the deletion until the subsequent re-list. +type ResourceEventHandler interface { + OnAdd(obj interface{}) + OnUpdate(oldObj, newObj interface{}) + OnDelete(obj interface{}) +} + +// ResourceEventHandlerFuncs is an adaptor to let you easily specify as many or +// as few of the notification functions as you want while still implementing +// ResourceEventHandler. This adapter does not remove the prohibition against +// modifying the objects. +type ResourceEventHandlerFuncs struct { + AddFunc func(obj interface{}) + UpdateFunc func(oldObj, newObj interface{}) + DeleteFunc func(obj interface{}) +} + +// OnAdd calls AddFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnAdd(obj interface{}) { + if r.AddFunc != nil { + r.AddFunc(obj) + } +} + +// OnUpdate calls UpdateFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnUpdate(oldObj, newObj interface{}) { + if r.UpdateFunc != nil { + r.UpdateFunc(oldObj, newObj) + } +} + +// OnDelete calls DeleteFunc if it's not nil. +func (r ResourceEventHandlerFuncs) OnDelete(obj interface{}) { + if r.DeleteFunc != nil { + r.DeleteFunc(obj) + } +} + +// FilteringResourceEventHandler applies the provided filter to all events coming +// in, ensuring the appropriate nested handler method is invoked. An object +// that starts passing the filter after an update is considered an add, and an +// object that stops passing the filter after an update is considered a delete. +// Like the handlers, the filter MUST NOT modify the objects it is given. +type FilteringResourceEventHandler struct { + FilterFunc func(obj interface{}) bool + Handler ResourceEventHandler +} + +// OnAdd calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnAdd(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnAdd(obj) +} + +// OnUpdate ensures the proper handler is called depending on whether the filter matches +func (r FilteringResourceEventHandler) OnUpdate(oldObj, newObj interface{}) { + newer := r.FilterFunc(newObj) + older := r.FilterFunc(oldObj) + switch { + case newer && older: + r.Handler.OnUpdate(oldObj, newObj) + case newer && !older: + r.Handler.OnAdd(newObj) + case !newer && older: + r.Handler.OnDelete(oldObj) + default: + // do nothing + } +} + +// OnDelete calls the nested handler only if the filter succeeds +func (r FilteringResourceEventHandler) OnDelete(obj interface{}) { + if !r.FilterFunc(obj) { + return + } + r.Handler.OnDelete(obj) +} + +// DeletionHandlingMetaNamespaceKeyFunc checks for +// DeletedFinalStateUnknown objects before calling +// MetaNamespaceKeyFunc. +func DeletionHandlingMetaNamespaceKeyFunc(obj interface{}) (string, error) { + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return MetaNamespaceKeyFunc(obj) +} + +// NewInformer returns a Store and a controller for populating the store +// while also providing event notifications. You should only used the returned +// Store for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +func NewInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// NewIndexerInformer returns an Indexer and a Controller for populating the index +// while also providing event notifications. You should only used the returned +// Index for Get/List operations; Add/Modify/Deletes will cause the event +// notifications to be faulty. +// +// Parameters: +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - indexers is the indexer for the received object type. +func NewIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, nil) +} + +// TransformFunc allows for transforming an object before it will be processed +// and put into the controller cache and before the corresponding handlers will +// be called on it. +// TransformFunc (similarly to ResourceEventHandler functions) should be able +// to correctly handle the tombstone of type cache.DeletedFinalStateUnknown +// +// The most common usage pattern is to clean-up some parts of the object to +// reduce component memory usage if a given component doesn't care about them. +// given controller doesn't care for them +type TransformFunc func(interface{}) (interface{}, error) + +// NewTransformingInformer returns a Store and a controller for populating +// the store while also providing event notifications. You should only used +// the returned Store for Get/List operations; Add/Modify/Deletes will cause +// the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// put into the Store and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + transformer TransformFunc, +) (Store, Controller) { + // This will hold the client state, as we know it. + clientState := NewStore(DeletionHandlingMetaNamespaceKeyFunc) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// NewTransformingIndexerInformer returns an Indexer and a controller for +// populating the index while also providing event notifications. You should +// only used the returned Index for Get/List operations; Add/Modify/Deletes +// will cause the event notifications to be faulty. +// The given transform function will be called on all objects before they will +// be put into the Index and corresponding Add/Modify/Delete handlers will +// be invoked for them. +func NewTransformingIndexerInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + indexers Indexers, + transformer TransformFunc, +) (Indexer, Controller) { + // This will hold the client state, as we know it. + clientState := NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers) + + return clientState, newInformer(lw, objType, resyncPeriod, h, clientState, transformer) +} + +// Multiplexes updates in the form of a list of Deltas into a Store, and informs +// a given handler of events OnUpdate, OnAdd, OnDelete +func processDeltas( + // Object which receives event notifications from the given deltas + handler ResourceEventHandler, + clientState Store, + transformer TransformFunc, + deltas Deltas, +) error { + // from oldest to newest + for _, d := range deltas { + obj := d.Object + if transformer != nil { + var err error + obj, err = transformer(obj) + if err != nil { + return err + } + } + + switch d.Type { + case Sync, Replaced, Added, Updated: + if old, exists, err := clientState.Get(obj); err == nil && exists { + if err := clientState.Update(obj); err != nil { + return err + } + handler.OnUpdate(old, obj) + } else { + if err := clientState.Add(obj); err != nil { + return err + } + handler.OnAdd(obj) + } + case Deleted: + if err := clientState.Delete(obj); err != nil { + return err + } + handler.OnDelete(obj) + } + } + return nil +} + +// newInformer returns a controller for populating the store while also +// providing event notifications. +// +// Parameters +// - lw is list and watch functions for the source of the resource you want to +// be informed of. +// - objType is an object of the type that you expect to receive. +// - resyncPeriod: if non-zero, will re-list this often (you will get OnUpdate +// calls, even if nothing changed). Otherwise, re-list will be delayed as +// long as possible (until the upstream source closes the watch or times out, +// or you stop the controller). +// - h is the object you want notifications sent to. +// - clientState is the store you want to populate +func newInformer( + lw ListerWatcher, + objType runtime.Object, + resyncPeriod time.Duration, + h ResourceEventHandler, + clientState Store, + transformer TransformFunc, +) Controller { + // This will hold incoming changes. Note how we pass clientState in as a + // KeyLister, that way resync operations will result in the correct set + // of update/delete deltas. + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: clientState, + EmitDeltaTypeReplaced: true, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: lw, + ObjectType: objType, + FullResyncPeriod: resyncPeriod, + RetryOnError: false, + + Process: func(obj interface{}) error { + if deltas, ok := obj.(Deltas); ok { + return processDeltas(h, clientState, transformer, deltas) + } + return errors.New("object given as Process argument is not Deltas") + }, + } + return New(cfg) +} diff --git a/vendor/k8s.io/client-go/tools/cache/delta_fifo.go b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go new file mode 100644 index 00000000..0c13a41f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/delta_fifo.go @@ -0,0 +1,757 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "k8s.io/klog/v2" + utiltrace "k8s.io/utils/trace" +) + +// DeltaFIFOOptions is the configuration parameters for DeltaFIFO. All are +// optional. +type DeltaFIFOOptions struct { + + // KeyFunction is used to figure out what key an object should have. (It's + // exposed in the returned DeltaFIFO's KeyOf() method, with additional + // handling around deleted objects and queue state). + // Optional, the default is MetaNamespaceKeyFunc. + KeyFunction KeyFunc + + // KnownObjects is expected to return a list of keys that the consumer of + // this queue "knows about". It is used to decide which items are missing + // when Replace() is called; 'Deleted' deltas are produced for the missing items. + // KnownObjects may be nil if you can tolerate missing deletions on Replace(). + KnownObjects KeyListerGetter + + // EmitDeltaTypeReplaced indicates that the queue consumer + // understands the Replaced DeltaType. Before the `Replaced` event type was + // added, calls to Replace() were handled the same as Sync(). For + // backwards-compatibility purposes, this is false by default. + // When true, `Replaced` events will be sent for items passed to a Replace() call. + // When false, `Sync` events will be sent instead. + EmitDeltaTypeReplaced bool +} + +// DeltaFIFO is like FIFO, but differs in two ways. One is that the +// accumulator associated with a given object's key is not that object +// but rather a Deltas, which is a slice of Delta values for that +// object. Applying an object to a Deltas means to append a Delta +// except when the potentially appended Delta is a Deleted and the +// Deltas already ends with a Deleted. In that case the Deltas does +// not grow, although the terminal Deleted will be replaced by the new +// Deleted if the older Deleted's object is a +// DeletedFinalStateUnknown. +// +// The other difference is that DeltaFIFO has two additional ways that +// an object can be applied to an accumulator: Replaced and Sync. +// If EmitDeltaTypeReplaced is not set to true, Sync will be used in +// replace events for backwards compatibility. Sync is used for periodic +// resync events. +// +// DeltaFIFO is a producer-consumer queue, where a Reflector is +// intended to be the producer, and the consumer is whatever calls +// the Pop() method. +// +// DeltaFIFO solves this use case: +// - You want to process every object change (delta) at most once. +// - When you process an object, you want to see everything +// that's happened to it since you last processed it. +// - You want to process the deletion of some of the objects. +// - You might want to periodically reprocess objects. +// +// DeltaFIFO's Pop(), Get(), and GetByKey() methods return +// interface{} to satisfy the Store/Queue interfaces, but they +// will always return an object of type Deltas. List() returns +// the newest object from each accumulator in the FIFO. +// +// A DeltaFIFO's knownObjects KeyListerGetter provides the abilities +// to list Store keys and to get objects by Store key. The objects in +// question are called "known objects" and this set of objects +// modifies the behavior of the Delete, Replace, and Resync methods +// (each in a different way). +// +// A note on threading: If you call Pop() in parallel from multiple +// threads, you could end up with multiple threads processing slightly +// different versions of the same object. +type DeltaFIFO struct { + // lock/cond protects access to 'items' and 'queue'. + lock sync.RWMutex + cond sync.Cond + + // `items` maps a key to a Deltas. + // Each such Deltas has at least one Delta. + items map[string]Deltas + + // `queue` maintains FIFO order of keys for consumption in Pop(). + // There are no duplicates in `queue`. + // A key is in `queue` if and only if it is in `items`. + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update/AddIfNotPresent was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item + // insertion and retrieval, and should be deterministic. + keyFunc KeyFunc + + // knownObjects list keys that are "known" --- affecting Delete(), + // Replace(), and Resync() + knownObjects KeyListerGetter + + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool + + // emitDeltaTypeReplaced is whether to emit the Replaced or Sync + // DeltaType when Replace() is called (to preserve backwards compat). + emitDeltaTypeReplaced bool +} + +// DeltaType is the type of a change (addition, deletion, etc) +type DeltaType string + +// Change type definition +const ( + Added DeltaType = "Added" + Updated DeltaType = "Updated" + Deleted DeltaType = "Deleted" + // Replaced is emitted when we encountered watch errors and had to do a + // relist. We don't know if the replaced object has changed. + // + // NOTE: Previous versions of DeltaFIFO would use Sync for Replace events + // as well. Hence, Replaced is only emitted when the option + // EmitDeltaTypeReplaced is true. + Replaced DeltaType = "Replaced" + // Sync is for synthetic events during a periodic resync. + Sync DeltaType = "Sync" +) + +// Delta is a member of Deltas (a list of Delta objects) which +// in its turn is the type stored by a DeltaFIFO. It tells you what +// change happened, and the object's state after* that change. +// +// [*] Unless the change is a deletion, and then you'll get the final +// state of the object before it was deleted. +type Delta struct { + Type DeltaType + Object interface{} +} + +// Deltas is a list of one or more 'Delta's to an individual object. +// The oldest delta is at index 0, the newest delta is the last one. +type Deltas []Delta + +// NewDeltaFIFO returns a Queue which can be used to process changes to items. +// +// keyFunc is used to figure out what key an object should have. (It is +// exposed in the returned DeltaFIFO's KeyOf() method, with additional handling +// around deleted objects and queue state). +// +// 'knownObjects' may be supplied to modify the behavior of Delete, +// Replace, and Resync. It may be nil if you do not need those +// modifications. +// +// TODO: consider merging keyLister with this object, tracking a list of +// "known" keys when Pop() is called. Have to think about how that +// affects error retrying. +// +// NOTE: It is possible to misuse this and cause a race when using an +// external known object source. +// Whether there is a potential race depends on how the consumer +// modifies knownObjects. In Pop(), process function is called under +// lock, so it is safe to update data structures in it that need to be +// in sync with the queue (e.g. knownObjects). +// +// Example: +// In case of sharedIndexInformer being a consumer +// (https://github.com/kubernetes/kubernetes/blob/0cdd940f/staging/src/k8s.io/client-go/tools/cache/shared_informer.go#L192), +// there is no race as knownObjects (s.indexer) is modified safely +// under DeltaFIFO's lock. The only exceptions are GetStore() and +// GetIndexer() methods, which expose ways to modify the underlying +// storage. Currently these two methods are used for creating Lister +// and internal tests. +// +// Also see the comment on DeltaFIFO. +// +// Warning: This constructs a DeltaFIFO that does not differentiate between +// events caused by a call to Replace (e.g., from a relist, which may +// contain object updates), and synthetic events caused by a periodic resync +// (which just emit the existing object). See https://issue.k8s.io/86015 for details. +// +// Use `NewDeltaFIFOWithOptions(DeltaFIFOOptions{..., EmitDeltaTypeReplaced: true})` +// instead to receive a `Replaced` event depending on the type. +// +// Deprecated: Equivalent to NewDeltaFIFOWithOptions(DeltaFIFOOptions{KeyFunction: keyFunc, KnownObjects: knownObjects}) +func NewDeltaFIFO(keyFunc KeyFunc, knownObjects KeyListerGetter) *DeltaFIFO { + return NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KeyFunction: keyFunc, + KnownObjects: knownObjects, + }) +} + +// NewDeltaFIFOWithOptions returns a Queue which can be used to process changes to +// items. See also the comment on DeltaFIFO. +func NewDeltaFIFOWithOptions(opts DeltaFIFOOptions) *DeltaFIFO { + if opts.KeyFunction == nil { + opts.KeyFunction = MetaNamespaceKeyFunc + } + + f := &DeltaFIFO{ + items: map[string]Deltas{}, + queue: []string{}, + keyFunc: opts.KeyFunction, + knownObjects: opts.KnownObjects, + + emitDeltaTypeReplaced: opts.EmitDeltaTypeReplaced, + } + f.cond.L = &f.lock + return f +} + +var ( + _ = Queue(&DeltaFIFO{}) // DeltaFIFO is a Queue +) + +var ( + // ErrZeroLengthDeltasObject is returned in a KeyError if a Deltas + // object with zero length is encountered (should be impossible, + // but included for completeness). + ErrZeroLengthDeltasObject = errors.New("0 length Deltas object; can't get key") +) + +// Close the queue. +func (f *DeltaFIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// KeyOf exposes f's keyFunc, but also detects the key of a Deltas object or +// DeletedFinalStateUnknown objects. +func (f *DeltaFIFO) KeyOf(obj interface{}) (string, error) { + if d, ok := obj.(Deltas); ok { + if len(d) == 0 { + return "", KeyError{obj, ErrZeroLengthDeltasObject} + } + obj = d.Newest().Object + } + if d, ok := obj.(DeletedFinalStateUnknown); ok { + return d.Key, nil + } + return f.keyFunc(obj) +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *DeltaFIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *DeltaFIFO) Add(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Added, obj) +} + +// Update is just like Add, but makes an Updated Delta. +func (f *DeltaFIFO) Update(obj interface{}) error { + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + return f.queueActionLocked(Updated, obj) +} + +// Delete is just like Add, but makes a Deleted Delta. If the given +// object does not already exist, it will be ignored. (It may have +// already been deleted by a Replace (re-list), for example.) In this +// method `f.knownObjects`, if not nil, provides (via GetByKey) +// _additional_ objects that are considered to already exist. +func (f *DeltaFIFO) Delete(obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if f.knownObjects == nil { + if _, exists := f.items[id]; !exists { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } else { + // We only want to skip the "deletion" action if the object doesn't + // exist in knownObjects and it doesn't have corresponding item in items. + // Note that even if there is a "deletion" action in items, we can ignore it, + // because it will be deduped automatically in "queueActionLocked" + _, exists, err := f.knownObjects.GetByKey(id) + _, itemsExist := f.items[id] + if err == nil && !exists && !itemsExist { + // Presumably, this was deleted when a relist happened. + // Don't provide a second report of the same deletion. + return nil + } + } + + // exist in items and/or KnownObjects + return f.queueActionLocked(Deleted, obj) +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +// +// Important: obj must be a Deltas (the output of the Pop() function). Yes, this is +// different from the Add/Update/Delete functions. +func (f *DeltaFIFO) AddIfNotPresent(obj interface{}) error { + deltas, ok := obj.(Deltas) + if !ok { + return fmt.Errorf("object must be of type deltas, but got: %#v", obj) + } + id, err := f.KeyOf(deltas) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, deltas) + return nil +} + +// addIfNotPresent inserts deltas under id if it does not exist, and assumes the caller +// already holds the fifo lock. +func (f *DeltaFIFO) addIfNotPresent(id string, deltas Deltas) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = deltas + f.cond.Broadcast() +} + +// re-listing and watching can deliver the same update multiple times in any +// order. This will combine the most recent two deltas if they are the same. +func dedupDeltas(deltas Deltas) Deltas { + n := len(deltas) + if n < 2 { + return deltas + } + a := &deltas[n-1] + b := &deltas[n-2] + if out := isDup(a, b); out != nil { + deltas[n-2] = *out + return deltas[:n-1] + } + return deltas +} + +// If a & b represent the same event, returns the delta that ought to be kept. +// Otherwise, returns nil. +// TODO: is there anything other than deletions that need deduping? +func isDup(a, b *Delta) *Delta { + if out := isDeletionDup(a, b); out != nil { + return out + } + // TODO: Detect other duplicate situations? Are there any? + return nil +} + +// keep the one with the most information if both are deletions. +func isDeletionDup(a, b *Delta) *Delta { + if b.Type != Deleted || a.Type != Deleted { + return nil + } + // Do more sophisticated checks, or is this sufficient? + if _, ok := b.Object.(DeletedFinalStateUnknown); ok { + return a + } + return b +} + +// queueActionLocked appends to the delta list for the object. +// Caller must lock first. +func (f *DeltaFIFO) queueActionLocked(actionType DeltaType, obj interface{}) error { + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + oldDeltas := f.items[id] + newDeltas := append(oldDeltas, Delta{actionType, obj}) + newDeltas = dedupDeltas(newDeltas) + + if len(newDeltas) > 0 { + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = newDeltas + f.cond.Broadcast() + } else { + // This never happens, because dedupDeltas never returns an empty list + // when given a non-empty list (as it is here). + // If somehow it happens anyway, deal with it but complain. + if oldDeltas == nil { + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; ignoring", id, oldDeltas, obj) + return nil + } + klog.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; breaking invariant by storing empty Deltas", id, oldDeltas, obj) + f.items[id] = newDeltas + return fmt.Errorf("Impossible dedupDeltas for id=%q: oldDeltas=%#+v, obj=%#+v; broke DeltaFIFO invariant by storing empty Deltas", id, oldDeltas, obj) + } + return nil +} + +// List returns a list of all the items; it returns the object +// from the most recent Delta. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + return f.listLocked() +} + +func (f *DeltaFIFO) listLocked() []interface{} { + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item.Newest().Object) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *DeltaFIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.queue)) + for _, key := range f.queue { + list = append(list, key) + } + return list +} + +// Get returns the complete list of deltas for the requested item, +// or sets exists=false. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.KeyOf(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the complete list of deltas for the requested item, +// setting exists=false if that list is empty. +// You should treat the items returned inside the deltas as immutable. +func (f *DeltaFIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + d, exists := f.items[key] + if exists { + // Copy item's slice so operations on this slice + // won't interfere with the object we return. + d = copyDeltas(d) + } + return d, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *DeltaFIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop blocks until the queue has some items, and then returns one. If +// multiple items are ready, they are returned in the order in which they were +// added/updated. The item is removed from the queue (and the store) before it +// is returned, so if you don't successfully process it, you need to add it back +// with AddIfNotPresent(). +// process function is called under lock, so it is safe to update data structures +// in it that need to be in sync with the queue (e.g. knownKeys). The PopProcessFunc +// may return an instance of ErrRequeue with a nested error to indicate the current +// item should be requeued (equivalent to calling AddIfNotPresent under the lock). +// process should avoid expensive I/O operation so that other queue operations, i.e. +// Add() and Get(), won't be blocked for too long. +// +// Pop returns a 'Deltas', which has a complete list of all the things +// that happened to the object (deltas) while it was sitting in the queue. +func (f *DeltaFIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + depth := len(f.queue) + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // This should never happen + klog.Errorf("Inconceivable! %q was in f.queue but not f.items; ignoring.", id) + continue + } + delete(f.items, id) + // Only log traces if the queue depth is greater than 10 and it takes more than + // 100 milliseconds to process one item from the queue. + // Queue depth never goes high because processing an item is locking the queue, + // and new items can't be added until processing finish. + // https://github.com/kubernetes/kubernetes/issues/103789 + if depth > 10 { + trace := utiltrace.New("DeltaFIFO Pop Process", + utiltrace.Field{Key: "ID", Value: id}, + utiltrace.Field{Key: "Depth", Value: depth}, + utiltrace.Field{Key: "Reason", Value: "slow event handlers blocking the queue"}) + defer trace.LogIfLong(100 * time.Millisecond) + } + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + // Don't need to copyDeltas here, because we're transferring + // ownership to the caller. + return item, err + } +} + +// Replace atomically does two things: (1) it adds the given objects +// using the Sync or Replace DeltaType and then (2) it does some deletions. +// In particular: for every pre-existing key K that is not the key of +// an object in `list` there is the effect of +// `Delete(DeletedFinalStateUnknown{K, O})` where O is current object +// of K. If `f.knownObjects == nil` then the pre-existing keys are +// those in `f.items` and the current object of K is the `.Newest()` +// of the Deltas associated with K. Otherwise the pre-existing keys +// are those listed by `f.knownObjects` and the current object of K is +// what `f.knownObjects.GetByKey(K)` returns. +func (f *DeltaFIFO) Replace(list []interface{}, _ string) error { + f.lock.Lock() + defer f.lock.Unlock() + keys := make(sets.String, len(list)) + + // keep backwards compat for old clients + action := Sync + if f.emitDeltaTypeReplaced { + action = Replaced + } + + // Add Sync/Replaced action for each new item. + for _, item := range list { + key, err := f.KeyOf(item) + if err != nil { + return KeyError{item, err} + } + keys.Insert(key) + if err := f.queueActionLocked(action, item); err != nil { + return fmt.Errorf("couldn't enqueue object: %v", err) + } + } + + if f.knownObjects == nil { + // Do deletion detection against our own list. + queuedDeletions := 0 + for k, oldItem := range f.items { + if keys.Has(k) { + continue + } + // Delete pre-existing items not in the new list. + // This could happen if watch deletion event was missed while + // disconnected from apiserver. + var deletedObj interface{} + if n := oldItem.Newest(); n != nil { + deletedObj = n.Object + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + // While there shouldn't be any queued deletions in the initial + // population of the queue, it's better to be on the safe side. + f.initialPopulationCount = keys.Len() + queuedDeletions + } + + return nil + } + + // Detect deletions not already in the queue. + knownKeys := f.knownObjects.ListKeys() + queuedDeletions := 0 + for _, k := range knownKeys { + if keys.Has(k) { + continue + } + + deletedObj, exists, err := f.knownObjects.GetByKey(k) + if err != nil { + deletedObj = nil + klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k) + } else if !exists { + deletedObj = nil + klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k) + } + queuedDeletions++ + if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil { + return err + } + } + + if !f.populated { + f.populated = true + f.initialPopulationCount = keys.Len() + queuedDeletions + } + + return nil +} + +// Resync adds, with a Sync type of Delta, every object listed by +// `f.knownObjects` whose key is not already queued for processing. +// If `f.knownObjects` is `nil` then Resync does nothing. +func (f *DeltaFIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + if f.knownObjects == nil { + return nil + } + + keys := f.knownObjects.ListKeys() + for _, k := range keys { + if err := f.syncKeyLocked(k); err != nil { + return err + } + } + return nil +} + +func (f *DeltaFIFO) syncKeyLocked(key string) error { + obj, exists, err := f.knownObjects.GetByKey(key) + if err != nil { + klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key) + return nil + } else if !exists { + klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key) + return nil + } + + // If we are doing Resync() and there is already an event queued for that object, + // we ignore the Resync for it. This is to avoid the race, in which the resync + // comes with the previous value of object (since queueing an event for the object + // doesn't trigger changing the underlying store . + id, err := f.KeyOf(obj) + if err != nil { + return KeyError{obj, err} + } + if len(f.items[id]) > 0 { + return nil + } + + if err := f.queueActionLocked(Sync, obj); err != nil { + return fmt.Errorf("couldn't queue object: %v", err) + } + return nil +} + +// A KeyListerGetter is anything that knows how to list its keys and look up by key. +type KeyListerGetter interface { + KeyLister + KeyGetter +} + +// A KeyLister is anything that knows how to list its keys. +type KeyLister interface { + ListKeys() []string +} + +// A KeyGetter is anything that knows how to get the value stored under a given key. +type KeyGetter interface { + // GetByKey returns the value associated with the key, or sets exists=false. + GetByKey(key string) (value interface{}, exists bool, err error) +} + +// Oldest is a convenience function that returns the oldest delta, or +// nil if there are no deltas. +func (d Deltas) Oldest() *Delta { + if len(d) > 0 { + return &d[0] + } + return nil +} + +// Newest is a convenience function that returns the newest delta, or +// nil if there are no deltas. +func (d Deltas) Newest() *Delta { + if n := len(d); n > 0 { + return &d[n-1] + } + return nil +} + +// copyDeltas returns a shallow copy of d; that is, it copies the slice but not +// the objects in the slice. This allows Get/List to return an object that we +// know won't be clobbered by a subsequent modifications. +func copyDeltas(d Deltas) Deltas { + d2 := make(Deltas, len(d)) + copy(d2, d) + return d2 +} + +// DeletedFinalStateUnknown is placed into a DeltaFIFO in the case where an object +// was deleted but the watch deletion event was missed while disconnected from +// apiserver. In this case we don't know the final "resting" state of the object, so +// there's a chance the included `Obj` is stale. +type DeletedFinalStateUnknown struct { + Key string + Obj interface{} +} diff --git a/vendor/k8s.io/client-go/tools/cache/doc.go b/vendor/k8s.io/client-go/tools/cache/doc.go new file mode 100644 index 00000000..56b61d30 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache is a client-side caching mechanism. It is useful for +// reducing the number of server calls you'd otherwise need to make. +// Reflector watches a server and updates a Store. Two stores are provided; +// one that simply caches objects (for example, to allow a scheduler to +// list currently available nodes), and one that additionally acts as +// a FIFO queue (for example, to allow a scheduler to process incoming +// pods). +package cache // import "k8s.io/client-go/tools/cache" diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go new file mode 100644 index 00000000..813916eb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache.go @@ -0,0 +1,214 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +// ExpirationCache implements the store interface +// 1. All entries are automatically time stamped on insert +// a. The key is computed based off the original item/keyFunc +// b. The value inserted under that key is the timestamped item +// 2. Expiration happens lazily on read based on the expiration policy +// a. No item can be inserted into the store while we're expiring +// *any* item in the cache. +// 3. Time-stamps are stripped off unexpired entries before return +// +// Note that the ExpirationCache is inherently slower than a normal +// threadSafeStore because it takes a write lock every time it checks if +// an item has expired. +type ExpirationCache struct { + cacheStorage ThreadSafeStore + keyFunc KeyFunc + clock clock.Clock + expirationPolicy ExpirationPolicy + // expirationLock is a write lock used to guarantee that we don't clobber + // newly inserted objects because of a stale expiration timestamp comparison + expirationLock sync.Mutex +} + +// ExpirationPolicy dictates when an object expires. Currently only abstracted out +// so unittests don't rely on the system clock. +type ExpirationPolicy interface { + IsExpired(obj *TimestampedEntry) bool +} + +// TTLPolicy implements a ttl based ExpirationPolicy. +type TTLPolicy struct { + // >0: Expire entries with an age > ttl + // <=0: Don't expire any entry + TTL time.Duration + + // Clock used to calculate ttl expiration + Clock clock.Clock +} + +// IsExpired returns true if the given object is older than the ttl, or it can't +// determine its age. +func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool { + return p.TTL > 0 && p.Clock.Since(obj.Timestamp) > p.TTL +} + +// TimestampedEntry is the only type allowed in a ExpirationCache. +// Keep in mind that it is not safe to share timestamps between computers. +// Behavior may be inconsistent if you get a timestamp from the API Server and +// use it on the client machine as part of your ExpirationCache. +type TimestampedEntry struct { + Obj interface{} + Timestamp time.Time + key string +} + +// getTimestampedEntry returns the TimestampedEntry stored under the given key. +func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) { + item, _ := c.cacheStorage.Get(key) + if tsEntry, ok := item.(*TimestampedEntry); ok { + return tsEntry, true + } + return nil, false +} + +// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't +// already expired. It holds a write lock across deletion. +func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) { + // Prevent all inserts from the time we deem an item as "expired" to when we + // delete it, so an un-expired item doesn't sneak in under the same key, just + // before the Delete. + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + timestampedItem, exists := c.getTimestampedEntry(key) + if !exists { + return nil, false + } + if c.expirationPolicy.IsExpired(timestampedItem) { + c.cacheStorage.Delete(key) + return nil, false + } + return timestampedItem.Obj, true +} + +// GetByKey returns the item stored under the key, or sets exists=false. +func (c *ExpirationCache) GetByKey(key string) (interface{}, bool, error) { + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// Get returns unexpired items. It purges the cache of expired items in the +// process. +func (c *ExpirationCache) Get(obj interface{}) (interface{}, bool, error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + obj, exists := c.getOrExpire(key) + return obj, exists, nil +} + +// List retrieves a list of unexpired items. It purges the cache of expired +// items in the process. +func (c *ExpirationCache) List() []interface{} { + items := c.cacheStorage.List() + + list := make([]interface{}, 0, len(items)) + for _, item := range items { + key := item.(*TimestampedEntry).key + if obj, exists := c.getOrExpire(key); exists { + list = append(list, obj) + } + } + return list +} + +// ListKeys returns a list of all keys in the expiration cache. +func (c *ExpirationCache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// Add timestamps an item and inserts it into the cache, overwriting entries +// that might exist under the same key. +func (c *ExpirationCache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + + c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now(), key}) + return nil +} + +// Update has not been implemented yet for lack of a use case, so this method +// simply calls `Add`. This effectively refreshes the timestamp. +func (c *ExpirationCache) Update(obj interface{}) error { + return c.Add(obj) +} + +// Delete removes an item from the cache. +func (c *ExpirationCache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Delete(key) + return nil +} + +// Replace will convert all items in the given list to TimestampedEntries +// before attempting the replace operation. The replace operation will +// delete the contents of the ExpirationCache `c`. +func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + ts := c.clock.Now() + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = &TimestampedEntry{item, ts, key} + } + c.expirationLock.Lock() + defer c.expirationLock.Unlock() + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is a no-op for one of these +func (c *ExpirationCache) Resync() error { + return nil +} + +// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy +func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store { + return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}}) +} + +// NewExpirationStore creates and returns a ExpirationCache for a given policy +func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store { + return &ExpirationCache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + clock: clock.RealClock{}, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go new file mode 100644 index 00000000..a16f4735 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/expiration_cache_fakes.go @@ -0,0 +1,57 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/utils/clock" +) + +type fakeThreadSafeMap struct { + ThreadSafeStore + deletedKeys chan<- string +} + +func (c *fakeThreadSafeMap) Delete(key string) { + if c.deletedKeys != nil { + c.ThreadSafeStore.Delete(key) + c.deletedKeys <- key + } +} + +// FakeExpirationPolicy keeps the list for keys which never expires. +type FakeExpirationPolicy struct { + NeverExpire sets.String + RetrieveKeyFunc KeyFunc +} + +// IsExpired used to check if object is expired. +func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool { + key, _ := p.RetrieveKeyFunc(obj) + return !p.NeverExpire.Has(key) +} + +// NewFakeExpirationStore creates a new instance for the ExpirationCache. +func NewFakeExpirationStore(keyFunc KeyFunc, deletedKeys chan<- string, expirationPolicy ExpirationPolicy, cacheClock clock.Clock) Store { + cacheStorage := NewThreadSafeStore(Indexers{}, Indices{}) + return &ExpirationCache{ + cacheStorage: &fakeThreadSafeMap{cacheStorage, deletedKeys}, + keyFunc: keyFunc, + clock: cacheClock, + expirationPolicy: expirationPolicy, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go new file mode 100644 index 00000000..462d2266 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fake_custom_store.go @@ -0,0 +1,102 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// FakeCustomStore lets you define custom functions for store operations. +type FakeCustomStore struct { + AddFunc func(obj interface{}) error + UpdateFunc func(obj interface{}) error + DeleteFunc func(obj interface{}) error + ListFunc func() []interface{} + ListKeysFunc func() []string + GetFunc func(obj interface{}) (item interface{}, exists bool, err error) + GetByKeyFunc func(key string) (item interface{}, exists bool, err error) + ReplaceFunc func(list []interface{}, resourceVersion string) error + ResyncFunc func() error +} + +// Add calls the custom Add function if defined +func (f *FakeCustomStore) Add(obj interface{}) error { + if f.AddFunc != nil { + return f.AddFunc(obj) + } + return nil +} + +// Update calls the custom Update function if defined +func (f *FakeCustomStore) Update(obj interface{}) error { + if f.UpdateFunc != nil { + return f.UpdateFunc(obj) + } + return nil +} + +// Delete calls the custom Delete function if defined +func (f *FakeCustomStore) Delete(obj interface{}) error { + if f.DeleteFunc != nil { + return f.DeleteFunc(obj) + } + return nil +} + +// List calls the custom List function if defined +func (f *FakeCustomStore) List() []interface{} { + if f.ListFunc != nil { + return f.ListFunc() + } + return nil +} + +// ListKeys calls the custom ListKeys function if defined +func (f *FakeCustomStore) ListKeys() []string { + if f.ListKeysFunc != nil { + return f.ListKeysFunc() + } + return nil +} + +// Get calls the custom Get function if defined +func (f *FakeCustomStore) Get(obj interface{}) (item interface{}, exists bool, err error) { + if f.GetFunc != nil { + return f.GetFunc(obj) + } + return nil, false, nil +} + +// GetByKey calls the custom GetByKey function if defined +func (f *FakeCustomStore) GetByKey(key string) (item interface{}, exists bool, err error) { + if f.GetByKeyFunc != nil { + return f.GetByKeyFunc(key) + } + return nil, false, nil +} + +// Replace calls the custom Replace function if defined +func (f *FakeCustomStore) Replace(list []interface{}, resourceVersion string) error { + if f.ReplaceFunc != nil { + return f.ReplaceFunc(list, resourceVersion) + } + return nil +} + +// Resync calls the custom Resync function if defined +func (f *FakeCustomStore) Resync() error { + if f.ResyncFunc != nil { + return f.ResyncFunc() + } + return nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/fifo.go b/vendor/k8s.io/client-go/tools/cache/fifo.go new file mode 100644 index 00000000..8f331378 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/fifo.go @@ -0,0 +1,374 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// PopProcessFunc is passed to Pop() method of Queue interface. +// It is supposed to process the accumulator popped from the queue. +type PopProcessFunc func(interface{}) error + +// ErrRequeue may be returned by a PopProcessFunc to safely requeue +// the current item. The value of Err will be returned from Pop. +type ErrRequeue struct { + // Err is returned by the Pop function + Err error +} + +// ErrFIFOClosed used when FIFO is closed +var ErrFIFOClosed = errors.New("DeltaFIFO: manipulating with closed queue") + +func (e ErrRequeue) Error() string { + if e.Err == nil { + return "the popped item should be requeued without returning an error" + } + return e.Err.Error() +} + +// Queue extends Store with a collection of Store keys to "process". +// Every Add, Update, or Delete may put the object's key in that collection. +// A Queue has a way to derive the corresponding key given an accumulator. +// A Queue can be accessed concurrently from multiple goroutines. +// A Queue can be "closed", after which Pop operations return an error. +type Queue interface { + Store + + // Pop blocks until there is at least one key to process or the + // Queue is closed. In the latter case Pop returns with an error. + // In the former case Pop atomically picks one key to process, + // removes that (key, accumulator) association from the Store, and + // processes the accumulator. Pop returns the accumulator that + // was processed and the result of processing. The PopProcessFunc + // may return an ErrRequeue{inner} and in this case Pop will (a) + // return that (key, accumulator) association to the Queue as part + // of the atomic processing and (b) return the inner error from + // Pop. + Pop(PopProcessFunc) (interface{}, error) + + // AddIfNotPresent puts the given accumulator into the Queue (in + // association with the accumulator's key) if and only if that key + // is not already associated with a non-empty accumulator. + AddIfNotPresent(interface{}) error + + // HasSynced returns true if the first batch of keys have all been + // popped. The first batch of keys are those of the first Replace + // operation if that happened before any Add, AddIfNotPresent, + // Update, or Delete; otherwise the first batch is empty. + HasSynced() bool + + // Close the queue + Close() +} + +// Pop is helper function for popping from Queue. +// WARNING: Do NOT use this function in non-test code to avoid races +// unless you really really really really know what you are doing. +func Pop(queue Queue) interface{} { + var result interface{} + queue.Pop(func(obj interface{}) error { + result = obj + return nil + }) + return result +} + +// FIFO is a Queue in which (a) each accumulator is simply the most +// recently provided object and (b) the collection of keys to process +// is a FIFO. The accumulators all start out empty, and deleting an +// object from its accumulator empties the accumulator. The Resync +// operation is a no-op. +// +// Thus: if multiple adds/updates of a single object happen while that +// object's key is in the queue before it has been processed then it +// will only be processed once, and when it is processed the most +// recent version will be processed. This can't be done with a channel +// +// FIFO solves this use case: +// - You want to process every object (exactly) once. +// - You want to process the most recent version of the object when you process it. +// - You do not want to process deleted objects, they should be removed from the queue. +// - You do not want to periodically reprocess objects. +// +// Compare with DeltaFIFO for other use cases. +type FIFO struct { + lock sync.RWMutex + cond sync.Cond + // We depend on the property that every key in `items` is also in `queue` + items map[string]interface{} + queue []string + + // populated is true if the first batch of items inserted by Replace() has been populated + // or Delete/Add/Update was called first. + populated bool + // initialPopulationCount is the number of items inserted by the first call of Replace() + initialPopulationCount int + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + + // Indication the queue is closed. + // Used to indicate a queue is closed so a control loop can exit when a queue is empty. + // Currently, not used to gate any of CRUD operations. + closed bool +} + +var ( + _ = Queue(&FIFO{}) // FIFO is a Queue +) + +// Close the queue. +func (f *FIFO) Close() { + f.lock.Lock() + defer f.lock.Unlock() + f.closed = true + f.cond.Broadcast() +} + +// HasSynced returns true if an Add/Update/Delete/AddIfNotPresent are called first, +// or the first batch of items inserted by Replace() has been popped. +func (f *FIFO) HasSynced() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.populated && f.initialPopulationCount == 0 +} + +// Add inserts an item, and puts it in the queue. The item is only enqueued +// if it doesn't already exist in the set. +func (f *FIFO) Add(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + if _, exists := f.items[id]; !exists { + f.queue = append(f.queue, id) + } + f.items[id] = obj + f.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If the item is already +// present in the set, it is neither enqueued nor added to the set. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (f *FIFO) AddIfNotPresent(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.addIfNotPresent(id, obj) + return nil +} + +// addIfNotPresent assumes the fifo lock is already held and adds the provided +// item to the queue under id if it does not already exist. +func (f *FIFO) addIfNotPresent(id string, obj interface{}) { + f.populated = true + if _, exists := f.items[id]; exists { + return + } + + f.queue = append(f.queue, id) + f.items[id] = obj + f.cond.Broadcast() +} + +// Update is the same as Add in this implementation. +func (f *FIFO) Update(obj interface{}) error { + return f.Add(obj) +} + +// Delete removes an item. It doesn't add it to the queue, because +// this implementation assumes the consumer only cares about the objects, +// not the order in which they were created/added. +func (f *FIFO) Delete(obj interface{}) error { + id, err := f.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + f.lock.Lock() + defer f.lock.Unlock() + f.populated = true + delete(f.items, id) + return err +} + +// List returns a list of all the items. +func (f *FIFO) List() []interface{} { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]interface{}, 0, len(f.items)) + for _, item := range f.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the FIFO. +func (f *FIFO) ListKeys() []string { + f.lock.RLock() + defer f.lock.RUnlock() + list := make([]string, 0, len(f.items)) + for key := range f.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (f *FIFO) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := f.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return f.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (f *FIFO) GetByKey(key string) (item interface{}, exists bool, err error) { + f.lock.RLock() + defer f.lock.RUnlock() + item, exists = f.items[key] + return item, exists, nil +} + +// IsClosed checks if the queue is closed +func (f *FIFO) IsClosed() bool { + f.lock.Lock() + defer f.lock.Unlock() + return f.closed +} + +// Pop waits until an item is ready and processes it. If multiple items are +// ready, they are returned in the order in which they were added/updated. +// The item is removed from the queue (and the store) before it is processed, +// so if you don't successfully process it, it should be added back with +// AddIfNotPresent(). process function is called under lock, so it is safe +// update data structures in it that need to be in sync with the queue. +func (f *FIFO) Pop(process PopProcessFunc) (interface{}, error) { + f.lock.Lock() + defer f.lock.Unlock() + for { + for len(f.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the f.closed is set and the condition is broadcasted. + // Which causes this loop to continue and return from the Pop(). + if f.closed { + return nil, ErrFIFOClosed + } + + f.cond.Wait() + } + id := f.queue[0] + f.queue = f.queue[1:] + if f.initialPopulationCount > 0 { + f.initialPopulationCount-- + } + item, ok := f.items[id] + if !ok { + // Item may have been deleted subsequently. + continue + } + delete(f.items, id) + err := process(item) + if e, ok := err.(ErrRequeue); ok { + f.addIfNotPresent(id, item) + err = e.Err + } + return item, err + } +} + +// Replace will delete the contents of 'f', using instead the given map. +// 'f' takes ownership of the map, you should not reference the map again +// after calling this function. f's queue is reset, too; upon return, it +// will contain the items in the map, in no particular order. +func (f *FIFO) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := f.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + + f.lock.Lock() + defer f.lock.Unlock() + + if !f.populated { + f.populated = true + f.initialPopulationCount = len(items) + } + + f.items = items + f.queue = f.queue[:0] + for id := range items { + f.queue = append(f.queue, id) + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// Resync will ensure that every object in the Store has its key in the queue. +// This should be a no-op, because that property is maintained by all operations. +func (f *FIFO) Resync() error { + f.lock.Lock() + defer f.lock.Unlock() + + inQueue := sets.NewString() + for _, id := range f.queue { + inQueue.Insert(id) + } + for id := range f.items { + if !inQueue.Has(id) { + f.queue = append(f.queue, id) + } + } + if len(f.queue) > 0 { + f.cond.Broadcast() + } + return nil +} + +// NewFIFO returns a Store which can be used to queue up items to +// process. +func NewFIFO(keyFunc KeyFunc) *FIFO { + f := &FIFO{ + items: map[string]interface{}{}, + queue: []string{}, + keyFunc: keyFunc, + } + f.cond.L = &f.lock + return f +} diff --git a/vendor/k8s.io/client-go/tools/cache/heap.go b/vendor/k8s.io/client-go/tools/cache/heap.go new file mode 100644 index 00000000..819325e9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/heap.go @@ -0,0 +1,322 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file implements a heap data structure. + +package cache + +import ( + "container/heap" + "fmt" + "sync" +) + +const ( + closedMsg = "heap is closed" +) + +// LessFunc is used to compare two objects in the heap. +type LessFunc func(interface{}, interface{}) bool + +type heapItem struct { + obj interface{} // The object which is stored in the heap. + index int // The index of the object's key in the Heap.queue. +} + +type itemKeyValue struct { + key string + obj interface{} +} + +// heapData is an internal struct that implements the standard heap interface +// and keeps the data stored in the heap. +type heapData struct { + // items is a map from key of the objects to the objects and their index. + // We depend on the property that items in the map are in the queue and vice versa. + items map[string]*heapItem + // queue implements a heap data structure and keeps the order of elements + // according to the heap invariant. The queue keeps the keys of objects stored + // in "items". + queue []string + + // keyFunc is used to make the key used for queued item insertion and retrieval, and + // should be deterministic. + keyFunc KeyFunc + // lessFunc is used to compare two objects in the heap. + lessFunc LessFunc +} + +var ( + _ = heap.Interface(&heapData{}) // heapData is a standard heap +) + +// Less compares two objects and returns true if the first one should go +// in front of the second one in the heap. +func (h *heapData) Less(i, j int) bool { + if i > len(h.queue) || j > len(h.queue) { + return false + } + itemi, ok := h.items[h.queue[i]] + if !ok { + return false + } + itemj, ok := h.items[h.queue[j]] + if !ok { + return false + } + return h.lessFunc(itemi.obj, itemj.obj) +} + +// Len returns the number of items in the Heap. +func (h *heapData) Len() int { return len(h.queue) } + +// Swap implements swapping of two elements in the heap. This is a part of standard +// heap interface and should never be called directly. +func (h *heapData) Swap(i, j int) { + h.queue[i], h.queue[j] = h.queue[j], h.queue[i] + item := h.items[h.queue[i]] + item.index = i + item = h.items[h.queue[j]] + item.index = j +} + +// Push is supposed to be called by heap.Push only. +func (h *heapData) Push(kv interface{}) { + keyValue := kv.(*itemKeyValue) + n := len(h.queue) + h.items[keyValue.key] = &heapItem{keyValue.obj, n} + h.queue = append(h.queue, keyValue.key) +} + +// Pop is supposed to be called by heap.Pop only. +func (h *heapData) Pop() interface{} { + key := h.queue[len(h.queue)-1] + h.queue = h.queue[0 : len(h.queue)-1] + item, ok := h.items[key] + if !ok { + // This is an error + return nil + } + delete(h.items, key) + return item.obj +} + +// Heap is a thread-safe producer/consumer queue that implements a heap data structure. +// It can be used to implement priority queues and similar data structures. +type Heap struct { + lock sync.RWMutex + cond sync.Cond + + // data stores objects and has a queue that keeps their ordering according + // to the heap invariant. + data *heapData + + // closed indicates that the queue is closed. + // It is mainly used to let Pop() exit its control loop while waiting for an item. + closed bool +} + +// Close the Heap and signals condition variables that may be waiting to pop +// items from the heap. +func (h *Heap) Close() { + h.lock.Lock() + defer h.lock.Unlock() + h.closed = true + h.cond.Broadcast() +} + +// Add inserts an item, and puts it in the queue. The item is updated if it +// already exists. +func (h *Heap) Add(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + h.cond.Broadcast() + return nil +} + +// BulkAdd adds all the items in the list to the queue and then signals the condition +// variable. It is useful when the caller would like to add all of the items +// to the queue before consumer starts processing them. +func (h *Heap) BulkAdd(list []interface{}) error { + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + for _, obj := range list { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + if _, exists := h.data.items[key]; exists { + h.data.items[key].obj = obj + heap.Fix(h.data, h.data.items[key].index) + } else { + h.addIfNotPresentLocked(key, obj) + } + } + h.cond.Broadcast() + return nil +} + +// AddIfNotPresent inserts an item, and puts it in the queue. If an item with +// the key is present in the map, no changes is made to the item. +// +// This is useful in a single producer/consumer scenario so that the consumer can +// safely retry items without contending with the producer and potentially enqueueing +// stale items. +func (h *Heap) AddIfNotPresent(obj interface{}) error { + id, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if h.closed { + return fmt.Errorf(closedMsg) + } + h.addIfNotPresentLocked(id, obj) + h.cond.Broadcast() + return nil +} + +// addIfNotPresentLocked assumes the lock is already held and adds the provided +// item to the queue if it does not already exist. +func (h *Heap) addIfNotPresentLocked(key string, obj interface{}) { + if _, exists := h.data.items[key]; exists { + return + } + heap.Push(h.data, &itemKeyValue{key, obj}) +} + +// Update is the same as Add in this implementation. When the item does not +// exist, it is added. +func (h *Heap) Update(obj interface{}) error { + return h.Add(obj) +} + +// Delete removes an item. +func (h *Heap) Delete(obj interface{}) error { + key, err := h.data.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + h.lock.Lock() + defer h.lock.Unlock() + if item, ok := h.data.items[key]; ok { + heap.Remove(h.data, item.index) + return nil + } + return fmt.Errorf("object not found") +} + +// Pop waits until an item is ready. If multiple items are +// ready, they are returned in the order given by Heap.data.lessFunc. +func (h *Heap) Pop() (interface{}, error) { + h.lock.Lock() + defer h.lock.Unlock() + for len(h.data.queue) == 0 { + // When the queue is empty, invocation of Pop() is blocked until new item is enqueued. + // When Close() is called, the h.closed is set and the condition is broadcast, + // which causes this loop to continue and return from the Pop(). + if h.closed { + return nil, fmt.Errorf("heap is closed") + } + h.cond.Wait() + } + obj := heap.Pop(h.data) + if obj == nil { + return nil, fmt.Errorf("object was removed from heap data") + } + + return obj, nil +} + +// List returns a list of all the items. +func (h *Heap) List() []interface{} { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]interface{}, 0, len(h.data.items)) + for _, item := range h.data.items { + list = append(list, item.obj) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently in the Heap. +func (h *Heap) ListKeys() []string { + h.lock.RLock() + defer h.lock.RUnlock() + list := make([]string, 0, len(h.data.items)) + for key := range h.data.items { + list = append(list, key) + } + return list +} + +// Get returns the requested item, or sets exists=false. +func (h *Heap) Get(obj interface{}) (interface{}, bool, error) { + key, err := h.data.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return h.GetByKey(key) +} + +// GetByKey returns the requested item, or sets exists=false. +func (h *Heap) GetByKey(key string) (interface{}, bool, error) { + h.lock.RLock() + defer h.lock.RUnlock() + item, exists := h.data.items[key] + if !exists { + return nil, false, nil + } + return item.obj, true, nil +} + +// IsClosed returns true if the queue is closed. +func (h *Heap) IsClosed() bool { + h.lock.RLock() + defer h.lock.RUnlock() + return h.closed +} + +// NewHeap returns a Heap which can be used to queue up items to process. +func NewHeap(keyFn KeyFunc, lessFn LessFunc) *Heap { + h := &Heap{ + data: &heapData{ + items: map[string]*heapItem{}, + queue: []string{}, + keyFunc: keyFn, + lessFunc: lessFn, + }, + } + h.cond.L = &h.lock + return h +} diff --git a/vendor/k8s.io/client-go/tools/cache/index.go b/vendor/k8s.io/client-go/tools/cache/index.go new file mode 100644 index 00000000..b78d3086 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/index.go @@ -0,0 +1,101 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Indexer extends Store with multiple indices and restricts each +// accumulator to simply hold the current object (and be empty after +// Delete). +// +// There are three kinds of strings here: +// 1. a storage key, as defined in the Store interface, +// 2. a name of an index, and +// 3. an "indexed value", which is produced by an IndexFunc and +// can be a field value or any other string computed from the object. +type Indexer interface { + Store + // Index returns the stored objects whose set of indexed values + // intersects the set of indexed values of the given object, for + // the named index + Index(indexName string, obj interface{}) ([]interface{}, error) + // IndexKeys returns the storage keys of the stored objects whose + // set of indexed values for the named index includes the given + // indexed value + IndexKeys(indexName, indexedValue string) ([]string, error) + // ListIndexFuncValues returns all the indexed values of the given index + ListIndexFuncValues(indexName string) []string + // ByIndex returns the stored objects whose set of indexed values + // for the named index includes the given indexed value + ByIndex(indexName, indexedValue string) ([]interface{}, error) + // GetIndexers return the indexers + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error +} + +// IndexFunc knows how to compute the set of indexed values for an object. +type IndexFunc func(obj interface{}) ([]string, error) + +// IndexFuncToKeyFuncAdapter adapts an indexFunc to a keyFunc. This is only useful if your index function returns +// unique values for every object. This conversion can create errors when more than one key is found. You +// should prefer to make proper key and index functions. +func IndexFuncToKeyFuncAdapter(indexFunc IndexFunc) KeyFunc { + return func(obj interface{}) (string, error) { + indexKeys, err := indexFunc(obj) + if err != nil { + return "", err + } + if len(indexKeys) > 1 { + return "", fmt.Errorf("too many keys: %v", indexKeys) + } + if len(indexKeys) == 0 { + return "", fmt.Errorf("unexpected empty indexKeys") + } + return indexKeys[0], nil + } +} + +const ( + // NamespaceIndex is the lookup name for the most common index function, which is to index by the namespace field. + NamespaceIndex string = "namespace" +) + +// MetaNamespaceIndexFunc is a default index function that indexes based on an object's namespace +func MetaNamespaceIndexFunc(obj interface{}) ([]string, error) { + meta, err := meta.Accessor(obj) + if err != nil { + return []string{""}, fmt.Errorf("object has no meta: %v", err) + } + return []string{meta.GetNamespace()}, nil +} + +// Index maps the indexed value to a set of keys in the store that match on that value +type Index map[string]sets.String + +// Indexers maps a name to an IndexFunc +type Indexers map[string]IndexFunc + +// Indices maps a name to an Index +type Indices map[string]Index diff --git a/vendor/k8s.io/client-go/tools/cache/listers.go b/vendor/k8s.io/client-go/tools/cache/listers.go new file mode 100644 index 00000000..420ca7b2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listers.go @@ -0,0 +1,169 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// AppendFunc is used to add a matching item to whatever list the caller is using +type AppendFunc func(interface{}) + +// ListAll calls appendFn with each value retrieved from store which matches the selector. +func ListAll(store Store, selector labels.Selector, appendFn AppendFunc) error { + selectAll := selector.Empty() + for _, m := range store.List() { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + return nil +} + +// ListAllByNamespace used to list items belongs to namespace from Indexer. +func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selector, appendFn AppendFunc) error { + if namespace == metav1.NamespaceAll { + return ListAll(indexer, selector, appendFn) + } + + items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace}) + if err != nil { + // Ignore error; do slow search without index. + klog.Warningf("can not retrieve list of objects using index : %v", err) + for _, m := range indexer.List() { + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if metadata.GetNamespace() == namespace && selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + + } + return nil + } + + selectAll := selector.Empty() + for _, m := range items { + if selectAll { + // Avoid computing labels of the objects to speed up common flows + // of listing all objects. + appendFn(m) + continue + } + metadata, err := meta.Accessor(m) + if err != nil { + return err + } + if selector.Matches(labels.Set(metadata.GetLabels())) { + appendFn(m) + } + } + + return nil +} + +// GenericLister is a lister skin on a generic Indexer +type GenericLister interface { + // List will return all objects across namespaces + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve assuming that name==key + Get(name string) (runtime.Object, error) + // ByNamespace will give you a GenericNamespaceLister for one namespace + ByNamespace(namespace string) GenericNamespaceLister +} + +// GenericNamespaceLister is a lister skin on a generic Indexer +type GenericNamespaceLister interface { + // List will return all objects in this namespace + List(selector labels.Selector) (ret []runtime.Object, err error) + // Get will attempt to retrieve by namespace and name + Get(name string) (runtime.Object, error) +} + +// NewGenericLister creates a new instance for the genericLister. +func NewGenericLister(indexer Indexer, resource schema.GroupResource) GenericLister { + return &genericLister{indexer: indexer, resource: resource} +} + +type genericLister struct { + indexer Indexer + resource schema.GroupResource +} + +func (s *genericLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericLister) ByNamespace(namespace string) GenericNamespaceLister { + return &genericNamespaceLister{indexer: s.indexer, namespace: namespace, resource: s.resource} +} + +func (s *genericLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} + +type genericNamespaceLister struct { + indexer Indexer + namespace string + resource schema.GroupResource +} + +func (s *genericNamespaceLister) List(selector labels.Selector) (ret []runtime.Object, err error) { + err = ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(runtime.Object)) + }) + return ret, err +} + +func (s *genericNamespaceLister) Get(name string) (runtime.Object, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(s.resource, name) + } + return obj.(runtime.Object), nil +} diff --git a/vendor/k8s.io/client-go/tools/cache/listwatch.go b/vendor/k8s.io/client-go/tools/cache/listwatch.go new file mode 100644 index 00000000..10b7e651 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/listwatch.go @@ -0,0 +1,112 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + restclient "k8s.io/client-go/rest" +) + +// Lister is any object that knows how to perform an initial list. +type Lister interface { + // List should return a list type object; the Items field will be extracted, and the + // ResourceVersion field will be used to start the watch in the right place. + List(options metav1.ListOptions) (runtime.Object, error) +} + +// Watcher is any object that knows how to start a watch on a resource. +type Watcher interface { + // Watch should begin a watch at the specified version. + Watch(options metav1.ListOptions) (watch.Interface, error) +} + +// ListerWatcher is any object that knows how to perform an initial list and start a watch on a resource. +type ListerWatcher interface { + Lister + Watcher +} + +// ListFunc knows how to list resources +type ListFunc func(options metav1.ListOptions) (runtime.Object, error) + +// WatchFunc knows how to watch resources +type WatchFunc func(options metav1.ListOptions) (watch.Interface, error) + +// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface. +// It is a convenience function for users of NewReflector, etc. +// ListFunc and WatchFunc must not be nil +type ListWatch struct { + ListFunc ListFunc + WatchFunc WatchFunc + // DisableChunking requests no chunking for this list watcher. + DisableChunking bool +} + +// Getter interface knows how to access Get method from RESTClient. +type Getter interface { + Get() *restclient.Request +} + +// NewListWatchFromClient creates a new ListWatch from the specified client, resource, namespace and field selector. +func NewListWatchFromClient(c Getter, resource string, namespace string, fieldSelector fields.Selector) *ListWatch { + optionsModifier := func(options *metav1.ListOptions) { + options.FieldSelector = fieldSelector.String() + } + return NewFilteredListWatchFromClient(c, resource, namespace, optionsModifier) +} + +// NewFilteredListWatchFromClient creates a new ListWatch from the specified client, resource, namespace, and option modifier. +// Option modifier is a function takes a ListOptions and modifies the consumed ListOptions. Provide customized modifier function +// to apply modification to ListOptions with a field selector, a label selector, or any other desired options. +func NewFilteredListWatchFromClient(c Getter, resource string, namespace string, optionsModifier func(options *metav1.ListOptions)) *ListWatch { + listFunc := func(options metav1.ListOptions) (runtime.Object, error) { + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Do(context.TODO()). + Get() + } + watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { + options.Watch = true + optionsModifier(&options) + return c.Get(). + Namespace(namespace). + Resource(resource). + VersionedParams(&options, metav1.ParameterCodec). + Watch(context.TODO()) + } + return &ListWatch{ListFunc: listFunc, WatchFunc: watchFunc} +} + +// List a set of apiserver resources +func (lw *ListWatch) List(options metav1.ListOptions) (runtime.Object, error) { + // ListWatch is used in Reflector, which already supports pagination. + // Don't paginate here to avoid duplication. + return lw.ListFunc(options) +} + +// Watch a set of apiserver resources +func (lw *ListWatch) Watch(options metav1.ListOptions) (watch.Interface, error) { + return lw.WatchFunc(options) +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_cache.go b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go new file mode 100644 index 00000000..c6f953d8 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_cache.go @@ -0,0 +1,262 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilcache "k8s.io/apimachinery/pkg/util/cache" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationCache is able to take the result of update operations and stores them in an LRU +// that can be used to provide a more current view of a requested object. It requires interpreting +// resourceVersions for comparisons. +// Implementations must be thread-safe. +// TODO find a way to layer this into an informer/lister +type MutationCache interface { + GetByKey(key string) (interface{}, bool, error) + ByIndex(indexName, indexKey string) ([]interface{}, error) + Mutation(interface{}) +} + +// ResourceVersionComparator is able to compare object versions. +type ResourceVersionComparator interface { + CompareResourceVersion(lhs, rhs runtime.Object) int +} + +// NewIntegerResourceVersionMutationCache returns a MutationCache that understands how to +// deal with objects that have a resource version that: +// +// - is an integer +// - increases when updated +// - is comparable across the same resource in a namespace +// +// Most backends will have these semantics. Indexer may be nil. ttl controls how long an item +// remains in the mutation cache before it is removed. +// +// If includeAdds is true, objects in the mutation cache will be returned even if they don't exist +// in the underlying store. This is only safe if your use of the cache can handle mutation entries +// remaining in the cache for up to ttl when mutations and deletes occur very closely in time. +func NewIntegerResourceVersionMutationCache(backingCache Store, indexer Indexer, ttl time.Duration, includeAdds bool) MutationCache { + return &mutationCache{ + backingCache: backingCache, + indexer: indexer, + mutationCache: utilcache.NewLRUExpireCache(100), + comparator: etcdObjectVersioner{}, + ttl: ttl, + includeAdds: includeAdds, + } +} + +// mutationCache doesn't guarantee that it returns values added via Mutation since they can page out and +// since you can't distinguish between, "didn't observe create" and "was deleted after create", +// if the key is missing from the backing cache, we always return it as missing +type mutationCache struct { + lock sync.Mutex + backingCache Store + indexer Indexer + mutationCache *utilcache.LRUExpireCache + includeAdds bool + ttl time.Duration + + comparator ResourceVersionComparator +} + +// GetByKey is never guaranteed to return back the value set in Mutation. It could be paged out, it could +// be older than another copy, the backingCache may be more recent or, you might have written twice into the same key. +// You get a value that was valid at some snapshot of time and will always return the newer of backingCache and mutationCache. +func (c *mutationCache) GetByKey(key string) (interface{}, bool, error) { + c.lock.Lock() + defer c.lock.Unlock() + + obj, exists, err := c.backingCache.GetByKey(key) + if err != nil { + return nil, false, err + } + if !exists { + if !c.includeAdds { + // we can't distinguish between, "didn't observe create" and "was deleted after create", so + // if the key is missing, we always return it as missing + return nil, false, nil + } + obj, exists = c.mutationCache.Get(key) + if !exists { + return nil, false, nil + } + } + objRuntime, ok := obj.(runtime.Object) + if !ok { + return obj, true, nil + } + return c.newerObject(key, objRuntime), true, nil +} + +// ByIndex returns the newer objects that match the provided index and indexer key. +// Will return an error if no indexer was provided. +func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, error) { + c.lock.Lock() + defer c.lock.Unlock() + if c.indexer == nil { + return nil, fmt.Errorf("no indexer has been provided to the mutation cache") + } + keys, err := c.indexer.IndexKeys(name, indexKey) + if err != nil { + return nil, err + } + var items []interface{} + keySet := sets.NewString() + for _, key := range keys { + keySet.Insert(key) + obj, exists, err := c.indexer.GetByKey(key) + if err != nil { + return nil, err + } + if !exists { + continue + } + if objRuntime, ok := obj.(runtime.Object); ok { + items = append(items, c.newerObject(key, objRuntime)) + } else { + items = append(items, obj) + } + } + + if c.includeAdds { + fn := c.indexer.GetIndexers()[name] + // Keys() is returned oldest to newest, so full traversal does not alter the LRU behavior + for _, key := range c.mutationCache.Keys() { + updated, ok := c.mutationCache.Get(key) + if !ok { + continue + } + if keySet.Has(key.(string)) { + continue + } + elements, err := fn(updated) + if err != nil { + klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err) + continue + } + for _, inIndex := range elements { + if inIndex != indexKey { + continue + } + items = append(items, updated) + break + } + } + } + + return items, nil +} + +// newerObject checks the mutation cache for a newer object and returns one if found. If the +// mutated object is older than the backing object, it is removed from the Must be +// called while the lock is held. +func (c *mutationCache) newerObject(key string, backing runtime.Object) runtime.Object { + mutatedObj, exists := c.mutationCache.Get(key) + if !exists { + return backing + } + mutatedObjRuntime, ok := mutatedObj.(runtime.Object) + if !ok { + return backing + } + if c.comparator.CompareResourceVersion(backing, mutatedObjRuntime) >= 0 { + c.mutationCache.Remove(key) + return backing + } + return mutatedObjRuntime +} + +// Mutation adds a change to the cache that can be returned in GetByKey if it is newer than the backingCache +// copy. If you call Mutation twice with the same object on different threads, one will win, but its not defined +// which one. This doesn't affect correctness, since the GetByKey guaranteed of "later of these two caches" is +// preserved, but you may not get the version of the object you want. The object you get is only guaranteed to +// "one that was valid at some point in time", not "the one that I want". +func (c *mutationCache) Mutation(obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + + key, err := DeletionHandlingMetaNamespaceKeyFunc(obj) + if err != nil { + // this is a "nice to have", so failures shouldn't do anything weird + utilruntime.HandleError(err) + return + } + + if objRuntime, ok := obj.(runtime.Object); ok { + if mutatedObj, exists := c.mutationCache.Get(key); exists { + if mutatedObjRuntime, ok := mutatedObj.(runtime.Object); ok { + if c.comparator.CompareResourceVersion(objRuntime, mutatedObjRuntime) < 0 { + return + } + } + } + } + c.mutationCache.Add(key, obj, c.ttl) +} + +// etcdObjectVersioner implements versioning and extracting etcd node information +// for objects that have an embedded ObjectMeta or ListMeta field. +type etcdObjectVersioner struct{} + +// ObjectResourceVersion implements Versioner +func (a etcdObjectVersioner) ObjectResourceVersion(obj runtime.Object) (uint64, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + version := accessor.GetResourceVersion() + if len(version) == 0 { + return 0, nil + } + return strconv.ParseUint(version, 10, 64) +} + +// CompareResourceVersion compares etcd resource versions. Outside this API they are all strings, +// but etcd resource versions are special, they're actually ints, so we can easily compare them. +func (a etcdObjectVersioner) CompareResourceVersion(lhs, rhs runtime.Object) int { + lhsVersion, err := a.ObjectResourceVersion(lhs) + if err != nil { + // coder error + panic(err) + } + rhsVersion, err := a.ObjectResourceVersion(rhs) + if err != nil { + // coder error + panic(err) + } + + if lhsVersion == rhsVersion { + return 0 + } + if lhsVersion < rhsVersion { + return -1 + } + + return 1 +} diff --git a/vendor/k8s.io/client-go/tools/cache/mutation_detector.go b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go new file mode 100644 index 00000000..b37537cb --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/mutation_detector.go @@ -0,0 +1,166 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "os" + "reflect" + "strconv" + "sync" + "time" + + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/diff" +) + +var mutationDetectionEnabled = false + +func init() { + mutationDetectionEnabled, _ = strconv.ParseBool(os.Getenv("KUBE_CACHE_MUTATION_DETECTOR")) +} + +// MutationDetector is able to monitor objects for mutation within a limited window of time +type MutationDetector interface { + // AddObject adds the given object to the set being monitored for a while from now + AddObject(obj interface{}) + + // Run starts the monitoring and does not return until the monitoring is stopped. + Run(stopCh <-chan struct{}) +} + +// NewCacheMutationDetector creates a new instance for the defaultCacheMutationDetector. +func NewCacheMutationDetector(name string) MutationDetector { + if !mutationDetectionEnabled { + return dummyMutationDetector{} + } + klog.Warningln("Mutation detector is enabled, this will result in memory leakage.") + return &defaultCacheMutationDetector{name: name, period: 1 * time.Second, retainDuration: 2 * time.Minute} +} + +type dummyMutationDetector struct{} + +func (dummyMutationDetector) Run(stopCh <-chan struct{}) { +} +func (dummyMutationDetector) AddObject(obj interface{}) { +} + +// defaultCacheMutationDetector gives a way to detect if a cached object has been mutated +// It has a list of cached objects and their copies. I haven't thought of a way +// to see WHO is mutating it, just that it's getting mutated. +type defaultCacheMutationDetector struct { + name string + period time.Duration + + // compareLock ensures only a single call to CompareObjects runs at a time + compareObjectsLock sync.Mutex + + // addLock guards addedObjs between AddObject and CompareObjects + addedObjsLock sync.Mutex + addedObjs []cacheObj + + cachedObjs []cacheObj + + retainDuration time.Duration + lastRotated time.Time + retainedCachedObjs []cacheObj + + // failureFunc is injectable for unit testing. If you don't have it, the process will panic. + // This panic is intentional, since turning on this detection indicates you want a strong + // failure signal. This failure is effectively a p0 bug and you can't trust process results + // after a mutation anyway. + failureFunc func(message string) +} + +// cacheObj holds the actual object and a copy +type cacheObj struct { + cached interface{} + copied interface{} +} + +func (d *defaultCacheMutationDetector) Run(stopCh <-chan struct{}) { + // we DON'T want protection from panics. If we're running this code, we want to die + for { + if d.lastRotated.IsZero() { + d.lastRotated = time.Now() + } else if time.Since(d.lastRotated) > d.retainDuration { + d.retainedCachedObjs = d.cachedObjs + d.cachedObjs = nil + d.lastRotated = time.Now() + } + + d.CompareObjects() + + select { + case <-stopCh: + return + case <-time.After(d.period): + } + } +} + +// AddObject makes a deep copy of the object for later comparison. It only works on runtime.Object +// but that covers the vast majority of our cached objects +func (d *defaultCacheMutationDetector) AddObject(obj interface{}) { + if _, ok := obj.(DeletedFinalStateUnknown); ok { + return + } + if obj, ok := obj.(runtime.Object); ok { + copiedObj := obj.DeepCopyObject() + + d.addedObjsLock.Lock() + defer d.addedObjsLock.Unlock() + d.addedObjs = append(d.addedObjs, cacheObj{cached: obj, copied: copiedObj}) + } +} + +func (d *defaultCacheMutationDetector) CompareObjects() { + d.compareObjectsLock.Lock() + defer d.compareObjectsLock.Unlock() + + // move addedObjs into cachedObjs under lock + // this keeps the critical section small to avoid blocking AddObject while we compare cachedObjs + d.addedObjsLock.Lock() + d.cachedObjs = append(d.cachedObjs, d.addedObjs...) + d.addedObjs = nil + d.addedObjsLock.Unlock() + + altered := false + for i, obj := range d.cachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + for i, obj := range d.retainedCachedObjs { + if !reflect.DeepEqual(obj.cached, obj.copied) { + fmt.Printf("CACHE %s[%d] ALTERED!\n%v\n", d.name, i, diff.ObjectGoPrintSideBySide(obj.cached, obj.copied)) + altered = true + } + } + + if altered { + msg := fmt.Sprintf("cache %s modified", d.name) + if d.failureFunc != nil { + d.failureFunc(msg) + return + } + panic(msg) + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go new file mode 100644 index 00000000..9cd476be --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -0,0 +1,639 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "errors" + "fmt" + "io" + "math/rand" + "reflect" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/naming" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/pager" + "k8s.io/klog/v2" + "k8s.io/utils/clock" + "k8s.io/utils/trace" +) + +const defaultExpectedTypeName = "" + +// Reflector watches a specified resource and causes all changes to be reflected in the given store. +type Reflector struct { + // name identifies this reflector. By default it will be a file:line if possible. + name string + + // The name of the type we expect to place in the store. The name + // will be the stringification of expectedGVK if provided, and the + // stringification of expectedType otherwise. It is for display + // only, and should not be used for parsing or comparison. + expectedTypeName string + // An example object of the type we expect to place in the store. + // Only the type needs to be right, except that when that is + // `unstructured.Unstructured` the object's `"apiVersion"` and + // `"kind"` must also be right. + expectedType reflect.Type + // The GVK of the object we expect to place in the store if unstructured. + expectedGVK *schema.GroupVersionKind + // The destination to sync up with the watch source + store Store + // listerWatcher is used to perform lists and watches. + listerWatcher ListerWatcher + + // backoff manages backoff of ListWatch + backoffManager wait.BackoffManager + // initConnBackoffManager manages backoff the initial connection with the Watch call of ListAndWatch. + initConnBackoffManager wait.BackoffManager + // MaxInternalErrorRetryDuration defines how long we should retry internal errors returned by watch. + MaxInternalErrorRetryDuration time.Duration + + resyncPeriod time.Duration + // ShouldResync is invoked periodically and whenever it returns `true` the Store's Resync operation is invoked + ShouldResync func() bool + // clock allows tests to manipulate time + clock clock.Clock + // paginatedResult defines whether pagination should be forced for list calls. + // It is set based on the result of the initial list call. + paginatedResult bool + // lastSyncResourceVersion is the resource version token last + // observed when doing a sync with the underlying store + // it is thread safe, but not synchronized with the underlying store + lastSyncResourceVersion string + // isLastSyncResourceVersionUnavailable is true if the previous list or watch request with + // lastSyncResourceVersion failed with an "expired" or "too large resource version" error. + isLastSyncResourceVersionUnavailable bool + // lastSyncResourceVersionMutex guards read/write access to lastSyncResourceVersion + lastSyncResourceVersionMutex sync.RWMutex + // WatchListPageSize is the requested chunk size of initial and resync watch lists. + // If unset, for consistent reads (RV="") or reads that opt-into arbitrarily old data + // (RV="0") it will default to pager.PageSize, for the rest (RV != "" && RV != "0") + // it will turn off pagination to allow serving them from watch cache. + // NOTE: It should be used carefully as paginated lists are always served directly from + // etcd, which is significantly less efficient and may lead to serious performance and + // scalability problems. + WatchListPageSize int64 + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler +} + +// ResourceVersionUpdater is an interface that allows store implementation to +// track the current resource version of the reflector. This is especially +// important if storage bookmarks are enabled. +type ResourceVersionUpdater interface { + // UpdateResourceVersion is called each time current resource version of the reflector + // is updated. + UpdateResourceVersion(resourceVersion string) +} + +// The WatchErrorHandler is called whenever ListAndWatch drops the +// connection with an error. After calling this handler, the informer +// will backoff and retry. +// +// The default implementation looks at the error type and tries to log +// the error message at an appropriate level. +// +// Implementations of this handler may display the error message in other +// ways. Implementations should return quickly - any expensive processing +// should be offloaded. +type WatchErrorHandler func(r *Reflector, err error) + +// DefaultWatchErrorHandler is the default implementation of WatchErrorHandler +func DefaultWatchErrorHandler(r *Reflector, err error) { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case err == io.EOF: + // watch closed normally + case err == io.ErrUnexpectedEOF: + klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedTypeName, err) + default: + utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedTypeName, err)) + } +} + +var ( + // We try to spread the load on apiserver by setting timeouts for + // watch requests - it is random in [minWatchTimeout, 2*minWatchTimeout]. + minWatchTimeout = 5 * time.Minute +) + +// NewNamespaceKeyedIndexerAndReflector creates an Indexer and a Reflector +// The indexer is configured to key on namespace +func NewNamespaceKeyedIndexerAndReflector(lw ListerWatcher, expectedType interface{}, resyncPeriod time.Duration) (indexer Indexer, reflector *Reflector) { + indexer = NewIndexer(MetaNamespaceKeyFunc, Indexers{NamespaceIndex: MetaNamespaceIndexFunc}) + reflector = NewReflector(lw, expectedType, indexer, resyncPeriod) + return indexer, reflector +} + +// NewReflector creates a new Reflector object which will keep the +// given store up to date with the server's contents for the given +// resource. Reflector promises to only put things in the store that +// have the type of expectedType, unless expectedType is nil. If +// resyncPeriod is non-zero, then the reflector will periodically +// consult its ShouldResync function to determine whether to invoke +// the Store's Resync operation; `ShouldResync==nil` means always +// "yes". This enables you to use reflectors to periodically process +// everything as well as incrementally processing the things that +// change. +func NewReflector(lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + return NewNamedReflector(naming.GetNameFromCallsite(internalPackages...), lw, expectedType, store, resyncPeriod) +} + +// NewNamedReflector same as NewReflector, but with a specified name for logging +func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, store Store, resyncPeriod time.Duration) *Reflector { + realClock := &clock.RealClock{} + r := &Reflector{ + name: name, + listerWatcher: lw, + store: store, + // We used to make the call every 1sec (1 QPS), the goal here is to achieve ~98% traffic reduction when + // API server is not healthy. With these parameters, backoff will stop at [30,60) sec interval which is + // 0.22 QPS. If we don't backoff for 2min, assume API server is healthy and we reset the backoff. + backoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + initConnBackoffManager: wait.NewExponentialBackoffManager(800*time.Millisecond, 30*time.Second, 2*time.Minute, 2.0, 1.0, realClock), + resyncPeriod: resyncPeriod, + clock: realClock, + watchErrorHandler: WatchErrorHandler(DefaultWatchErrorHandler), + } + r.setExpectedType(expectedType) + return r +} + +func (r *Reflector) setExpectedType(expectedType interface{}) { + r.expectedType = reflect.TypeOf(expectedType) + if r.expectedType == nil { + r.expectedTypeName = defaultExpectedTypeName + return + } + + r.expectedTypeName = r.expectedType.String() + + if obj, ok := expectedType.(*unstructured.Unstructured); ok { + // Use gvk to check that watch event objects are of the desired type. + gvk := obj.GroupVersionKind() + if gvk.Empty() { + klog.V(4).Infof("Reflector from %s configured with expectedType of *unstructured.Unstructured with empty GroupVersionKind.", r.name) + return + } + r.expectedGVK = &gvk + r.expectedTypeName = gvk.String() + } +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"client-go/tools/cache/"} + +// Run repeatedly uses the reflector's ListAndWatch to fetch all the +// objects and subsequent deltas. +// Run will exit when stopCh is closed. +func (r *Reflector) Run(stopCh <-chan struct{}) { + klog.V(3).Infof("Starting reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) + wait.BackoffUntil(func() { + if err := r.ListAndWatch(stopCh); err != nil { + r.watchErrorHandler(r, err) + } + }, r.backoffManager, true, stopCh) + klog.V(3).Infof("Stopping reflector %s (%s) from %s", r.expectedTypeName, r.resyncPeriod, r.name) +} + +var ( + // nothing will ever be sent down this channel + neverExitWatch <-chan time.Time = make(chan time.Time) + + // Used to indicate that watching stopped because of a signal from the stop + // channel passed in from a client of the reflector. + errorStopRequested = errors.New("stop requested") +) + +// resyncChan returns a channel which will receive something when a resync is +// required, and a cleanup function. +func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) { + if r.resyncPeriod == 0 { + return neverExitWatch, func() bool { return false } + } + // The cleanup function is required: imagine the scenario where watches + // always fail so we end up listing frequently. Then, if we don't + // manually stop the timer, we could end up with many timers active + // concurrently. + t := r.clock.NewTimer(r.resyncPeriod) + return t.C(), t.Stop +} + +// ListAndWatch first lists all items and get the resource version at the moment of call, +// and then use the resource version to watch. +// It returns error if ListAndWatch didn't even try to initialize watch. +func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { + klog.V(3).Infof("Listing and watching %v from %s", r.expectedTypeName, r.name) + + err := r.list(stopCh) + if err != nil { + return err + } + + resyncerrc := make(chan error, 1) + cancelCh := make(chan struct{}) + defer close(cancelCh) + go func() { + resyncCh, cleanup := r.resyncChan() + defer func() { + cleanup() // Call the last one written into cleanup + }() + for { + select { + case <-resyncCh: + case <-stopCh: + return + case <-cancelCh: + return + } + if r.ShouldResync == nil || r.ShouldResync() { + klog.V(4).Infof("%s: forcing resync", r.name) + if err := r.store.Resync(); err != nil { + resyncerrc <- err + return + } + } + cleanup() + resyncCh, cleanup = r.resyncChan() + } + }() + + retry := NewRetryWithDeadline(r.MaxInternalErrorRetryDuration, time.Minute, apierrors.IsInternalError, r.clock) + for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + + timeoutSeconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) + options := metav1.ListOptions{ + ResourceVersion: r.LastSyncResourceVersion(), + // We want to avoid situations of hanging watchers. Stop any watchers that do not + // receive any events within the timeout window. + TimeoutSeconds: &timeoutSeconds, + // To reduce load on kube-apiserver on watch restarts, you may enable watch bookmarks. + // Reflector doesn't assume bookmarks are returned at all (if the server do not support + // watch bookmarks, it will ignore this field). + AllowWatchBookmarks: true, + } + + // start the clock before sending the request, since some proxies won't flush headers until after the first watch event is sent + start := r.clock.Now() + w, err := r.listerWatcher.Watch(options) + if err != nil { + // If this is "connection refused" error, it means that most likely apiserver is not responsive. + // It doesn't make sense to re-list all objects because most likely we will be able to restart + // watch where we ended. + // If that's the case begin exponentially backing off and resend watch request. + // Do the same for "429" errors. + if utilnet.IsConnectionRefused(err) || apierrors.IsTooManyRequests(err) { + <-r.initConnBackoffManager.Backoff().C() + continue + } + return err + } + + err = watchHandler(start, w, r.store, r.expectedType, r.expectedGVK, r.name, r.expectedTypeName, r.setLastSyncResourceVersion, r.clock, resyncerrc, stopCh) + retry.After(err) + if err != nil { + if err != errorStopRequested { + switch { + case isExpiredError(err): + // Don't set LastSyncResourceVersionUnavailable - LIST call with ResourceVersion=RV already + // has a semantic that it returns data at least as fresh as provided RV. + // So first try to LIST with setting RV to resource version of last observed object. + klog.V(4).Infof("%s: watch of %v closed with: %v", r.name, r.expectedTypeName, err) + case apierrors.IsTooManyRequests(err): + klog.V(2).Infof("%s: watch of %v returned 429 - backing off", r.name, r.expectedTypeName) + <-r.initConnBackoffManager.Backoff().C() + continue + case apierrors.IsInternalError(err) && retry.ShouldRetry(): + klog.V(2).Infof("%s: retrying watch of %v internal error: %v", r.name, r.expectedTypeName, err) + continue + default: + klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedTypeName, err) + } + } + return nil + } + } +} + +// list simply lists all items and records a resource version obtained from the server at the moment of the call. +// the resource version can be used for further progress notification (aka. watch). +func (r *Reflector) list(stopCh <-chan struct{}) error { + var resourceVersion string + options := metav1.ListOptions{ResourceVersion: r.relistResourceVersion()} + + initTrace := trace.New("Reflector ListAndWatch", trace.Field{Key: "name", Value: r.name}) + defer initTrace.LogIfLong(10 * time.Second) + var list runtime.Object + var paginatedResult bool + var err error + listCh := make(chan struct{}, 1) + panicCh := make(chan interface{}, 1) + go func() { + defer func() { + if r := recover(); r != nil { + panicCh <- r + } + }() + // Attempt to gather list in chunks, if supported by listerWatcher, if not, the first + // list request will return the full response. + pager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) { + return r.listerWatcher.List(opts) + })) + switch { + case r.WatchListPageSize != 0: + pager.PageSize = r.WatchListPageSize + case r.paginatedResult: + // We got a paginated result initially. Assume this resource and server honor + // paging requests (i.e. watch cache is probably disabled) and leave the default + // pager size set. + case options.ResourceVersion != "" && options.ResourceVersion != "0": + // User didn't explicitly request pagination. + // + // With ResourceVersion != "", we have a possibility to list from watch cache, + // but we do that (for ResourceVersion != "0") only if Limit is unset. + // To avoid thundering herd on etcd (e.g. on master upgrades), we explicitly + // switch off pagination to force listing from watch cache (if enabled). + // With the existing semantic of RV (result is at least as fresh as provided RV), + // this is correct and doesn't lead to going back in time. + // + // We also don't turn off pagination for ResourceVersion="0", since watch cache + // is ignoring Limit in that case anyway, and if watch cache is not enabled + // we don't introduce regression. + pager.PageSize = 0 + } + + list, paginatedResult, err = pager.List(context.Background(), options) + if isExpiredError(err) || isTooLargeResourceVersionError(err) { + r.setIsLastSyncResourceVersionUnavailable(true) + // Retry immediately if the resource version used to list is unavailable. + // The pager already falls back to full list if paginated list calls fail due to an "Expired" error on + // continuation pages, but the pager might not be enabled, the full list might fail because the + // resource version it is listing at is expired or the cache may not yet be synced to the provided + // resource version. So we need to fallback to resourceVersion="" in all to recover and ensure + // the reflector makes forward progress. + list, paginatedResult, err = pager.List(context.Background(), metav1.ListOptions{ResourceVersion: r.relistResourceVersion()}) + } + close(listCh) + }() + select { + case <-stopCh: + return nil + case r := <-panicCh: + panic(r) + case <-listCh: + } + initTrace.Step("Objects listed", trace.Field{Key: "error", Value: err}) + if err != nil { + klog.Warningf("%s: failed to list %v: %v", r.name, r.expectedTypeName, err) + return fmt.Errorf("failed to list %v: %w", r.expectedTypeName, err) + } + + // We check if the list was paginated and if so set the paginatedResult based on that. + // However, we want to do that only for the initial list (which is the only case + // when we set ResourceVersion="0"). The reasoning behind it is that later, in some + // situations we may force listing directly from etcd (by setting ResourceVersion="") + // which will return paginated result, even if watch cache is enabled. However, in + // that case, we still want to prefer sending requests to watch cache if possible. + // + // Paginated result returned for request with ResourceVersion="0" mean that watch + // cache is disabled and there are a lot of objects of a given type. In such case, + // there is no need to prefer listing from watch cache. + if options.ResourceVersion == "0" && paginatedResult { + r.paginatedResult = true + } + + r.setIsLastSyncResourceVersionUnavailable(false) // list was successful + listMetaInterface, err := meta.ListAccessor(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v: %v", list, err) + } + resourceVersion = listMetaInterface.GetResourceVersion() + initTrace.Step("Resource version extracted") + items, err := meta.ExtractList(list) + if err != nil { + return fmt.Errorf("unable to understand list result %#v (%v)", list, err) + } + initTrace.Step("Objects extracted") + if err := r.syncWith(items, resourceVersion); err != nil { + return fmt.Errorf("unable to sync list result: %v", err) + } + initTrace.Step("SyncWith done") + r.setLastSyncResourceVersion(resourceVersion) + initTrace.Step("Resource version updated") + return nil +} + +// syncWith replaces the store's items with the given list. +func (r *Reflector) syncWith(items []runtime.Object, resourceVersion string) error { + found := make([]interface{}, 0, len(items)) + for _, item := range items { + found = append(found, item) + } + return r.store.Replace(found, resourceVersion) +} + +// watchHandler watches w and sets setLastSyncResourceVersion +func watchHandler(start time.Time, + w watch.Interface, + store Store, + expectedType reflect.Type, + expectedGVK *schema.GroupVersionKind, + name string, + expectedTypeName string, + setLastSyncResourceVersion func(string), + clock clock.Clock, + errc chan error, + stopCh <-chan struct{}, +) error { + eventCount := 0 + + // Stopping the watcher should be idempotent and if we return from this function there's no way + // we're coming back in with the same watch interface. + defer w.Stop() + +loop: + for { + select { + case <-stopCh: + return errorStopRequested + case err := <-errc: + return err + case event, ok := <-w.ResultChan(): + if !ok { + break loop + } + if event.Type == watch.Error { + return apierrors.FromObject(event.Object) + } + if expectedType != nil { + if e, a := expectedType, reflect.TypeOf(event.Object); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected type %v, but watch event object had type %v", name, e, a)) + continue + } + } + if expectedGVK != nil { + if e, a := *expectedGVK, event.Object.GetObjectKind().GroupVersionKind(); e != a { + utilruntime.HandleError(fmt.Errorf("%s: expected gvk %v, but watch event object had gvk %v", name, e, a)) + continue + } + } + meta, err := meta.Accessor(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) + continue + } + resourceVersion := meta.GetResourceVersion() + switch event.Type { + case watch.Added: + err := store.Add(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to add watch event object (%#v) to store: %v", name, event.Object, err)) + } + case watch.Modified: + err := store.Update(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to update watch event object (%#v) to store: %v", name, event.Object, err)) + } + case watch.Deleted: + // TODO: Will any consumers need access to the "last known + // state", which is passed in event.Object? If so, may need + // to change this. + err := store.Delete(event.Object) + if err != nil { + utilruntime.HandleError(fmt.Errorf("%s: unable to delete watch event object (%#v) from store: %v", name, event.Object, err)) + } + case watch.Bookmark: + // A `Bookmark` means watch has synced here, just update the resourceVersion + default: + utilruntime.HandleError(fmt.Errorf("%s: unable to understand watch event %#v", name, event)) + } + setLastSyncResourceVersion(resourceVersion) + if rvu, ok := store.(ResourceVersionUpdater); ok { + rvu.UpdateResourceVersion(resourceVersion) + } + eventCount++ + } + } + + watchDuration := clock.Since(start) + if watchDuration < 1*time.Second && eventCount == 0 { + return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", name) + } + klog.V(4).Infof("%s: Watch close - %v total %v items received", name, expectedTypeName, eventCount) + return nil +} + +// LastSyncResourceVersion is the resource version observed when last sync with the underlying store +// The value returned is not synchronized with access to the underlying store and is not thread-safe +func (r *Reflector) LastSyncResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + return r.lastSyncResourceVersion +} + +func (r *Reflector) setLastSyncResourceVersion(v string) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.lastSyncResourceVersion = v +} + +// relistResourceVersion determines the resource version the reflector should list or relist from. +// Returns either the lastSyncResourceVersion so that this reflector will relist with a resource +// versions no older than has already been observed in relist results or watch events, or, if the last relist resulted +// in an HTTP 410 (Gone) status code, returns "" so that the relist will use the latest resource version available in +// etcd via a quorum read. +func (r *Reflector) relistResourceVersion() string { + r.lastSyncResourceVersionMutex.RLock() + defer r.lastSyncResourceVersionMutex.RUnlock() + + if r.isLastSyncResourceVersionUnavailable { + // Since this reflector makes paginated list requests, and all paginated list requests skip the watch cache + // if the lastSyncResourceVersion is unavailable, we set ResourceVersion="" and list again to re-establish reflector + // to the latest available ResourceVersion, using a consistent read from etcd. + return "" + } + if r.lastSyncResourceVersion == "" { + // For performance reasons, initial list performed by reflector uses "0" as resource version to allow it to + // be served from the watch cache if it is enabled. + return "0" + } + return r.lastSyncResourceVersion +} + +// setIsLastSyncResourceVersionUnavailable sets if the last list or watch request with lastSyncResourceVersion returned +// "expired" or "too large resource version" error. +func (r *Reflector) setIsLastSyncResourceVersionUnavailable(isUnavailable bool) { + r.lastSyncResourceVersionMutex.Lock() + defer r.lastSyncResourceVersionMutex.Unlock() + r.isLastSyncResourceVersionUnavailable = isUnavailable +} + +func isExpiredError(err error) bool { + // In Kubernetes 1.17 and earlier, the api server returns both apierrors.StatusReasonExpired and + // apierrors.StatusReasonGone for HTTP 410 (Gone) status code responses. In 1.18 the kube server is more consistent + // and always returns apierrors.StatusReasonExpired. For backward compatibility we can only remove the apierrors.IsGone + // check when we fully drop support for Kubernetes 1.17 servers from reflectors. + return apierrors.IsResourceExpired(err) || apierrors.IsGone(err) +} + +func isTooLargeResourceVersionError(err error) bool { + if apierrors.HasStatusCause(err, metav1.CauseTypeResourceVersionTooLarge) { + return true + } + // In Kubernetes 1.17.0-1.18.5, the api server doesn't set the error status cause to + // metav1.CauseTypeResourceVersionTooLarge to indicate that the requested minimum resource + // version is larger than the largest currently available resource version. To ensure backward + // compatibility with these server versions we also need to detect the error based on the content + // of the error message field. + if !apierrors.IsTimeout(err) { + return false + } + apierr, ok := err.(apierrors.APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return false + } + for _, cause := range apierr.Status().Details.Causes { + // Matches the message returned by api server 1.17.0-1.18.5 for this error condition + if cause.Message == "Too large resource version" { + return true + } + } + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go new file mode 100644 index 00000000..5c00115f --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/reflector_metrics.go @@ -0,0 +1,89 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file provides abstractions for setting the provider (e.g., prometheus) +// of metrics. + +package cache + +import ( + "sync" +) + +// GaugeMetric represents a single numerical value that can arbitrarily go up +// and down. +type GaugeMetric interface { + Set(float64) +} + +// CounterMetric represents a single numerical value that only ever +// goes up. +type CounterMetric interface { + Inc() +} + +// SummaryMetric captures individual observations. +type SummaryMetric interface { + Observe(float64) +} + +type noopMetric struct{} + +func (noopMetric) Inc() {} +func (noopMetric) Dec() {} +func (noopMetric) Observe(float64) {} +func (noopMetric) Set(float64) {} + +// MetricsProvider generates various metrics used by the reflector. +type MetricsProvider interface { + NewListsMetric(name string) CounterMetric + NewListDurationMetric(name string) SummaryMetric + NewItemsInListMetric(name string) SummaryMetric + + NewWatchesMetric(name string) CounterMetric + NewShortWatchesMetric(name string) CounterMetric + NewWatchDurationMetric(name string) SummaryMetric + NewItemsInWatchMetric(name string) SummaryMetric + + NewLastResourceVersionMetric(name string) GaugeMetric +} + +type noopMetricsProvider struct{} + +func (noopMetricsProvider) NewListsMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewListDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInListMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewShortWatchesMetric(name string) CounterMetric { return noopMetric{} } +func (noopMetricsProvider) NewWatchDurationMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewItemsInWatchMetric(name string) SummaryMetric { return noopMetric{} } +func (noopMetricsProvider) NewLastResourceVersionMetric(name string) GaugeMetric { + return noopMetric{} +} + +var metricsFactory = struct { + metricsProvider MetricsProvider + setProviders sync.Once +}{ + metricsProvider: noopMetricsProvider{}, +} + +// SetReflectorMetricsProvider sets the metrics provider +func SetReflectorMetricsProvider(metricsProvider MetricsProvider) { + metricsFactory.setProviders.Do(func() { + metricsFactory.metricsProvider = metricsProvider + }) +} diff --git a/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go new file mode 100644 index 00000000..8201fb15 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/retry_with_deadline.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "k8s.io/utils/clock" + "time" +) + +type RetryWithDeadline interface { + After(error) + ShouldRetry() bool +} + +type retryWithDeadlineImpl struct { + firstErrorTime time.Time + lastErrorTime time.Time + maxRetryDuration time.Duration + minResetPeriod time.Duration + isRetryable func(error) bool + clock clock.Clock +} + +func NewRetryWithDeadline(maxRetryDuration, minResetPeriod time.Duration, isRetryable func(error) bool, clock clock.Clock) RetryWithDeadline { + return &retryWithDeadlineImpl{ + firstErrorTime: time.Time{}, + lastErrorTime: time.Time{}, + maxRetryDuration: maxRetryDuration, + minResetPeriod: minResetPeriod, + isRetryable: isRetryable, + clock: clock, + } +} + +func (r *retryWithDeadlineImpl) reset() { + r.firstErrorTime = time.Time{} + r.lastErrorTime = time.Time{} +} + +func (r *retryWithDeadlineImpl) After(err error) { + if r.isRetryable(err) { + if r.clock.Now().Sub(r.lastErrorTime) >= r.minResetPeriod { + r.reset() + } + + if r.firstErrorTime.IsZero() { + r.firstErrorTime = r.clock.Now() + } + r.lastErrorTime = r.clock.Now() + } +} + +func (r *retryWithDeadlineImpl) ShouldRetry() bool { + if r.maxRetryDuration <= time.Duration(0) { + return false + } + + if r.clock.Now().Sub(r.firstErrorTime) <= r.maxRetryDuration { + return true + } + + r.reset() + return false +} diff --git a/vendor/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/client-go/tools/cache/shared_informer.go new file mode 100644 index 00000000..f5c7316a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/shared_informer.go @@ -0,0 +1,948 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "errors" + "fmt" + "sync" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/utils/buffer" + "k8s.io/utils/clock" + + "k8s.io/klog/v2" +) + +// SharedInformer provides eventually consistent linkage of its +// clients to the authoritative state of a given collection of +// objects. An object is identified by its API group, kind/resource, +// namespace (if any), and name; the `ObjectMeta.UID` is not part of +// an object's ID as far as this contract is concerned. One +// SharedInformer provides linkage to objects of a particular API +// group and kind/resource. The linked object collection of a +// SharedInformer may be further restricted to one namespace (if +// applicable) and/or by label selector and/or field selector. +// +// The authoritative state of an object is what apiservers provide +// access to, and an object goes through a strict sequence of states. +// An object state is either (1) present with a ResourceVersion and +// other appropriate content or (2) "absent". +// +// A SharedInformer maintains a local cache --- exposed by GetStore(), +// by GetIndexer() in the case of an indexed informer, and possibly by +// machinery involved in creating and/or accessing the informer --- of +// the state of each relevant object. This cache is eventually +// consistent with the authoritative state. This means that, unless +// prevented by persistent communication problems, if ever a +// particular object ID X is authoritatively associated with a state S +// then for every SharedInformer I whose collection includes (X, S) +// eventually either (1) I's cache associates X with S or a later +// state of X, (2) I is stopped, or (3) the authoritative state +// service for X terminates. To be formally complete, we say that the +// absent state meets any restriction by label selector or field +// selector. +// +// For a given informer and relevant object ID X, the sequence of +// states that appears in the informer's cache is a subsequence of the +// states authoritatively associated with X. That is, some states +// might never appear in the cache but ordering among the appearing +// states is correct. Note, however, that there is no promise about +// ordering between states seen for different objects. +// +// The local cache starts out empty, and gets populated and updated +// during `Run()`. +// +// As a simple example, if a collection of objects is henceforth +// unchanging, a SharedInformer is created that links to that +// collection, and that SharedInformer is `Run()` then that +// SharedInformer's cache eventually holds an exact copy of that +// collection (unless it is stopped too soon, the authoritative state +// service ends, or communication problems between the two +// persistently thwart achievement). +// +// As another simple example, if the local cache ever holds a +// non-absent state for some object ID and the object is eventually +// removed from the authoritative state then eventually the object is +// removed from the local cache (unless the SharedInformer is stopped +// too soon, the authoritative state service ends, or communication +// problems persistently thwart the desired result). +// +// The keys in the Store are of the form namespace/name for namespaced +// objects, and are simply the name for non-namespaced objects. +// Clients can use `MetaNamespaceKeyFunc(obj)` to extract the key for +// a given object, and `SplitMetaNamespaceKey(key)` to split a key +// into its constituent parts. +// +// Every query against the local cache is answered entirely from one +// snapshot of the cache's state. Thus, the result of a `List` call +// will not contain two entries with the same namespace and name. +// +// A client is identified here by a ResourceEventHandler. For every +// update to the SharedInformer's local cache and for every client +// added before `Run()`, eventually either the SharedInformer is +// stopped or the client is notified of the update. A client added +// after `Run()` starts gets a startup batch of notifications of +// additions of the objects existing in the cache at the time that +// client was added; also, for every update to the SharedInformer's +// local cache after that client was added, eventually either the +// SharedInformer is stopped or that client is notified of that +// update. Client notifications happen after the corresponding cache +// update and, in the case of a SharedIndexInformer, after the +// corresponding index updates. It is possible that additional cache +// and index updates happen before such a prescribed notification. +// For a given SharedInformer and client, the notifications are +// delivered sequentially. For a given SharedInformer, client, and +// object ID, the notifications are delivered in order. Because +// `ObjectMeta.UID` has no role in identifying objects, it is possible +// that when (1) object O1 with ID (e.g. namespace and name) X and +// `ObjectMeta.UID` U1 in the SharedInformer's local cache is deleted +// and later (2) another object O2 with ID X and ObjectMeta.UID U2 is +// created the informer's clients are not notified of (1) and (2) but +// rather are notified only of an update from O1 to O2. Clients that +// need to detect such cases might do so by comparing the `ObjectMeta.UID` +// field of the old and the new object in the code that handles update +// notifications (i.e. `OnUpdate` method of ResourceEventHandler). +// +// A client must process each notification promptly; a SharedInformer +// is not engineered to deal well with a large backlog of +// notifications to deliver. Lengthy processing should be passed off +// to something else, for example through a +// `client-go/util/workqueue`. +// +// A delete notification exposes the last locally known non-absent +// state, except that its ResourceVersion is replaced with a +// ResourceVersion in which the object is actually absent. +type SharedInformer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + // It returns a registration handle for the handler that can be used to remove + // the handler again. + AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the + // shared informer with the requested resync period; zero means + // this handler does not care about resyncs. The resync operation + // consists of delivering to the handler an update notification + // for every object in the informer's local cache; it does not add + // any interactions with the authoritative storage. Some + // informers do no resyncs at all, not even for handlers added + // with a non-zero resyncPeriod. For an informer that does + // resyncs, and for each handler that requests resyncs, that + // informer develops a nominal resync period that is no shorter + // than the requested period but may be longer. The actual time + // between any two resyncs may be longer than the nominal period + // because the implementation takes time to do work and there may + // be competing load and scheduling noise. + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) + // RemoveEventHandler removes a formerly added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent, and thread-safe. + RemoveEventHandler(handle ResourceEventHandlerRegistration) error + // GetStore returns the informer's local cache as a Store. + GetStore() Store + // GetController is deprecated, it does nothing useful + GetController() Controller + // Run starts and runs the shared informer, returning after it stops. + // The informer will be stopped when stopCh is closed. + Run(stopCh <-chan struct{}) + // HasSynced returns true if the shared informer's store has been + // informed by at least one full LIST of the authoritative state + // of the informer's object collection. This is unrelated to "resync". + HasSynced() bool + // LastSyncResourceVersion is the resource version observed when last synced with the underlying + // store. The value returned is not synchronized with access to the underlying store and is not + // thread-safe. + LastSyncResourceVersion() string + + // The WatchErrorHandler is called whenever ListAndWatch drops the + // connection with an error. After calling this handler, the informer + // will backoff and retry. + // + // The default implementation looks at the error type and tries to log + // the error message at an appropriate level. + // + // There's only one handler, so if you call this multiple times, last one + // wins; calling after the informer has been started returns an error. + // + // The handler is intended for visibility, not to e.g. pause the consumers. + // The handler should return quickly - any expensive processing should be + // offloaded. + SetWatchErrorHandler(handler WatchErrorHandler) error + + // The TransformFunc is called for each object which is about to be stored. + // + // This function is intended for you to take the opportunity to + // remove, transform, or normalize fields. One use case is to strip unused + // metadata fields out of objects to save on RAM cost. + // + // Must be set before starting the informer. + // + // Note: Since the object given to the handler may be already shared with + // other goroutines, it is advisable to copy the object being + // transform before mutating it at all and returning the copy to prevent + // data races. + SetTransform(handler TransformFunc) error + + // IsStopped reports whether the informer has already been stopped. + // Adding event handlers to already stopped informers is not possible. + // An informer already stopped will never be started again. + IsStopped() bool +} + +// Opaque interface representing the registration of ResourceEventHandler for +// a SharedInformer. Must be supplied back to the same SharedInformer's +// `RemoveEventHandler` to unregister the handlers. +type ResourceEventHandlerRegistration interface{} + +// SharedIndexInformer provides add and get Indexers ability based on SharedInformer. +type SharedIndexInformer interface { + SharedInformer + // AddIndexers add indexers to the informer before it starts. + AddIndexers(indexers Indexers) error + GetIndexer() Indexer +} + +// NewSharedInformer creates a new instance for the listwatcher. +func NewSharedInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration) SharedInformer { + return NewSharedIndexInformer(lw, exampleObject, defaultEventHandlerResyncPeriod, Indexers{}) +} + +// NewSharedIndexInformer creates a new instance for the listwatcher. +// The created informer will not do resyncs if the given +// defaultEventHandlerResyncPeriod is zero. Otherwise: for each +// handler that with a non-zero requested resync period, whether added +// before or after the informer starts, the nominal resync period is +// the requested resync period rounded up to a multiple of the +// informer's resync checking period. Such an informer's resync +// checking period is established when the informer starts running, +// and is the maximum of (a) the minimum of the resync periods +// requested before the informer starts and the +// defaultEventHandlerResyncPeriod given here and (b) the constant +// `minimumResyncPeriod` defined in this file. +func NewSharedIndexInformer(lw ListerWatcher, exampleObject runtime.Object, defaultEventHandlerResyncPeriod time.Duration, indexers Indexers) SharedIndexInformer { + realClock := &clock.RealClock{} + sharedIndexInformer := &sharedIndexInformer{ + processor: &sharedProcessor{clock: realClock}, + indexer: NewIndexer(DeletionHandlingMetaNamespaceKeyFunc, indexers), + listerWatcher: lw, + objectType: exampleObject, + resyncCheckPeriod: defaultEventHandlerResyncPeriod, + defaultEventHandlerResyncPeriod: defaultEventHandlerResyncPeriod, + cacheMutationDetector: NewCacheMutationDetector(fmt.Sprintf("%T", exampleObject)), + clock: realClock, + } + return sharedIndexInformer +} + +// InformerSynced is a function that can be used to determine if an informer has synced. This is useful for determining if caches have synced. +type InformerSynced func() bool + +const ( + // syncedPollPeriod controls how often you look at the status of your sync funcs + syncedPollPeriod = 100 * time.Millisecond + + // initialBufferSize is the initial number of event notifications that can be buffered. + initialBufferSize = 1024 +) + +// WaitForNamedCacheSync is a wrapper around WaitForCacheSync that generates log messages +// indicating that the caller identified by name is waiting for syncs, followed by +// either a successful or failed sync. +func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + klog.Infof("Waiting for caches to sync for %s", controllerName) + + if !WaitForCacheSync(stopCh, cacheSyncs...) { + utilruntime.HandleError(fmt.Errorf("unable to sync caches for %s", controllerName)) + return false + } + + klog.Infof("Caches are synced for %s", controllerName) + return true +} + +// WaitForCacheSync waits for caches to populate. It returns true if it was successful, false +// if the controller should shutdown +// callers should prefer WaitForNamedCacheSync() +func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { + err := wait.PollImmediateUntil(syncedPollPeriod, + func() (bool, error) { + for _, syncFunc := range cacheSyncs { + if !syncFunc() { + return false, nil + } + } + return true, nil + }, + stopCh) + if err != nil { + klog.V(2).Infof("stop requested") + return false + } + + klog.V(4).Infof("caches populated") + return true +} + +// `*sharedIndexInformer` implements SharedIndexInformer and has three +// main components. One is an indexed local cache, `indexer Indexer`. +// The second main component is a Controller that pulls +// objects/notifications using the ListerWatcher and pushes them into +// a DeltaFIFO --- whose knownObjects is the informer's local cache +// --- while concurrently Popping Deltas values from that fifo and +// processing them with `sharedIndexInformer::HandleDeltas`. Each +// invocation of HandleDeltas, which is done with the fifo's lock +// held, processes each Delta in turn. For each Delta this both +// updates the local cache and stuffs the relevant notification into +// the sharedProcessor. The third main component is that +// sharedProcessor, which is responsible for relaying those +// notifications to each of the informer's clients. +type sharedIndexInformer struct { + indexer Indexer + controller Controller + + processor *sharedProcessor + cacheMutationDetector MutationDetector + + listerWatcher ListerWatcher + + // objectType is an example object of the type this informer is + // expected to handle. Only the type needs to be right, except + // that when that is `unstructured.Unstructured` the object's + // `"apiVersion"` and `"kind"` must also be right. + objectType runtime.Object + + // resyncCheckPeriod is how often we want the reflector's resync timer to fire so it can call + // shouldResync to check if any of our listeners need a resync. + resyncCheckPeriod time.Duration + // defaultEventHandlerResyncPeriod is the default resync period for any handlers added via + // AddEventHandler (i.e. they don't specify one and just want to use the shared informer's default + // value). + defaultEventHandlerResyncPeriod time.Duration + // clock allows for testability + clock clock.Clock + + started, stopped bool + startedLock sync.Mutex + + // blockDeltas gives a way to stop all event distribution so that a late event handler + // can safely join the shared informer. + blockDeltas sync.Mutex + + // Called whenever the ListAndWatch drops the connection with an error. + watchErrorHandler WatchErrorHandler + + transform TransformFunc +} + +// dummyController hides the fact that a SharedInformer is different from a dedicated one +// where a caller can `Run`. The run method is disconnected in this case, because higher +// level logic will decide when to start the SharedInformer and related controller. +// Because returning information back is always asynchronous, the legacy callers shouldn't +// notice any change in behavior. +type dummyController struct { + informer *sharedIndexInformer +} + +func (v *dummyController) Run(stopCh <-chan struct{}) { +} + +func (v *dummyController) HasSynced() bool { + return v.informer.HasSynced() +} + +func (v *dummyController) LastSyncResourceVersion() string { + return "" +} + +type updateNotification struct { + oldObj interface{} + newObj interface{} +} + +type addNotification struct { + newObj interface{} +} + +type deleteNotification struct { + oldObj interface{} +} + +func (s *sharedIndexInformer) SetWatchErrorHandler(handler WatchErrorHandler) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.watchErrorHandler = handler + return nil +} + +func (s *sharedIndexInformer) SetTransform(handler TransformFunc) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + s.transform = handler + return nil +} + +func (s *sharedIndexInformer) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + if s.HasStarted() { + klog.Warningf("The sharedIndexInformer has started, run more than once is not allowed") + return + } + fifo := NewDeltaFIFOWithOptions(DeltaFIFOOptions{ + KnownObjects: s.indexer, + EmitDeltaTypeReplaced: true, + }) + + cfg := &Config{ + Queue: fifo, + ListerWatcher: s.listerWatcher, + ObjectType: s.objectType, + FullResyncPeriod: s.resyncCheckPeriod, + RetryOnError: false, + ShouldResync: s.processor.shouldResync, + + Process: s.HandleDeltas, + WatchErrorHandler: s.watchErrorHandler, + } + + func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + s.controller = New(cfg) + s.controller.(*controller).clock = s.clock + s.started = true + }() + + // Separate stop channel because Processor should be stopped strictly after controller + processorStopCh := make(chan struct{}) + var wg wait.Group + defer wg.Wait() // Wait for Processor to stop + defer close(processorStopCh) // Tell Processor to stop + wg.StartWithChannel(processorStopCh, s.cacheMutationDetector.Run) + wg.StartWithChannel(processorStopCh, s.processor.run) + + defer func() { + s.startedLock.Lock() + defer s.startedLock.Unlock() + s.stopped = true // Don't want any new listeners + }() + s.controller.Run(stopCh) +} + +func (s *sharedIndexInformer) HasStarted() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.started +} + +func (s *sharedIndexInformer) HasSynced() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return false + } + return s.controller.HasSynced() +} + +func (s *sharedIndexInformer) LastSyncResourceVersion() string { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.controller == nil { + return "" + } + return s.controller.LastSyncResourceVersion() +} + +func (s *sharedIndexInformer) GetStore() Store { + return s.indexer +} + +func (s *sharedIndexInformer) GetIndexer() Indexer { + return s.indexer +} + +func (s *sharedIndexInformer) AddIndexers(indexers Indexers) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.started { + return fmt.Errorf("informer has already started") + } + + return s.indexer.AddIndexers(indexers) +} + +func (s *sharedIndexInformer) GetController() Controller { + return &dummyController{informer: s} +} + +func (s *sharedIndexInformer) AddEventHandler(handler ResourceEventHandler) (ResourceEventHandlerRegistration, error) { + return s.AddEventHandlerWithResyncPeriod(handler, s.defaultEventHandlerResyncPeriod) +} + +func determineResyncPeriod(desired, check time.Duration) time.Duration { + if desired == 0 { + return desired + } + if check == 0 { + klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired) + return 0 + } + if desired < check { + klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check) + return check + } + return desired +} + +const minimumResyncPeriod = 1 * time.Second + +func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEventHandler, resyncPeriod time.Duration) (ResourceEventHandlerRegistration, error) { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + if s.stopped { + return nil, fmt.Errorf("handler %v was not added to shared informer because it has stopped already", handler) + } + + if resyncPeriod > 0 { + if resyncPeriod < minimumResyncPeriod { + klog.Warningf("resyncPeriod %v is too small. Changing it to the minimum allowed value of %v", resyncPeriod, minimumResyncPeriod) + resyncPeriod = minimumResyncPeriod + } + + if resyncPeriod < s.resyncCheckPeriod { + if s.started { + klog.Warningf("resyncPeriod %v is smaller than resyncCheckPeriod %v and the informer has already started. Changing it to %v", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod) + resyncPeriod = s.resyncCheckPeriod + } else { + // if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update + // resyncCheckPeriod to match resyncPeriod and adjust the resync periods of all the listeners + // accordingly + s.resyncCheckPeriod = resyncPeriod + s.processor.resyncCheckPeriodChanged(resyncPeriod) + } + } + } + + listener := newProcessListener(handler, resyncPeriod, determineResyncPeriod(resyncPeriod, s.resyncCheckPeriod), s.clock.Now(), initialBufferSize) + + if !s.started { + return s.processor.addListener(listener), nil + } + + // in order to safely join, we have to + // 1. stop sending add/update/delete notifications + // 2. do a list against the store + // 3. send synthetic "Add" events to the new handler + // 4. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + handle := s.processor.addListener(listener) + for _, item := range s.indexer.List() { + listener.add(addNotification{newObj: item}) + } + return handle, nil +} + +func (s *sharedIndexInformer) HandleDeltas(obj interface{}) error { + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + + if deltas, ok := obj.(Deltas); ok { + return processDeltas(s, s.indexer, s.transform, deltas) + } + return errors.New("object given as Process argument is not Deltas") +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnAdd(obj interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(obj) + s.processor.distribute(addNotification{newObj: obj}, false) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnUpdate(old, new interface{}) { + isSync := false + + // If is a Sync event, isSync should be true + // If is a Replaced event, isSync is true if resource version is unchanged. + // If RV is unchanged: this is a Sync/Replaced event, so isSync is true + + if accessor, err := meta.Accessor(new); err == nil { + if oldAccessor, err := meta.Accessor(old); err == nil { + // Events that didn't change resourceVersion are treated as resync events + // and only propagated to listeners that requested resync + isSync = accessor.GetResourceVersion() == oldAccessor.GetResourceVersion() + } + } + + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.cacheMutationDetector.AddObject(new) + s.processor.distribute(updateNotification{oldObj: old, newObj: new}, isSync) +} + +// Conforms to ResourceEventHandler +func (s *sharedIndexInformer) OnDelete(old interface{}) { + // Invocation of this function is locked under s.blockDeltas, so it is + // save to distribute the notification + s.processor.distribute(deleteNotification{oldObj: old}, false) +} + +// IsStopped reports whether the informer has already been stopped +func (s *sharedIndexInformer) IsStopped() bool { + s.startedLock.Lock() + defer s.startedLock.Unlock() + return s.stopped +} + +func (s *sharedIndexInformer) RemoveEventHandler(handle ResourceEventHandlerRegistration) error { + s.startedLock.Lock() + defer s.startedLock.Unlock() + + // in order to safely remove, we have to + // 1. stop sending add/update/delete notifications + // 2. remove and stop listener + // 3. unblock + s.blockDeltas.Lock() + defer s.blockDeltas.Unlock() + return s.processor.removeListener(handle) +} + +// sharedProcessor has a collection of processorListener and can +// distribute a notification object to its listeners. There are two +// kinds of distribute operations. The sync distributions go to a +// subset of the listeners that (a) is recomputed in the occasional +// calls to shouldResync and (b) every listener is initially put in. +// The non-sync distributions go to every listener. +type sharedProcessor struct { + listenersStarted bool + listenersLock sync.RWMutex + // Map from listeners to whether or not they are currently syncing + listeners map[*processorListener]bool + clock clock.Clock + wg wait.Group +} + +func (p *sharedProcessor) getListener(registration ResourceEventHandlerRegistration) *processorListener { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + if p.listeners == nil { + return nil + } + + if result, ok := registration.(*processorListener); ok { + if _, exists := p.listeners[result]; exists { + return result + } + } + + return nil +} + +func (p *sharedProcessor) addListener(listener *processorListener) ResourceEventHandlerRegistration { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + if p.listeners == nil { + p.listeners = make(map[*processorListener]bool) + } + + p.listeners[listener] = true + + if p.listenersStarted { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + + return listener +} + +func (p *sharedProcessor) removeListener(handle ResourceEventHandlerRegistration) error { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + listener, ok := handle.(*processorListener) + if !ok { + return fmt.Errorf("invalid key type %t", handle) + } else if p.listeners == nil { + // No listeners are registered, do nothing + return nil + } else if _, exists := p.listeners[listener]; !exists { + // Listener is not registered, just do nothing + return nil + } + + delete(p.listeners, listener) + + if p.listenersStarted { + close(listener.addCh) + } + + return nil +} + +func (p *sharedProcessor) distribute(obj interface{}, sync bool) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for listener, isSyncing := range p.listeners { + switch { + case !sync: + // non-sync messages are delivered to every listener + listener.add(obj) + case isSyncing: + // sync messages are delivered to every syncing listener + listener.add(obj) + default: + // skipping a sync obj for a non-syncing listener + } + } +} + +func (p *sharedProcessor) run(stopCh <-chan struct{}) { + func() { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + for listener := range p.listeners { + p.wg.Start(listener.run) + p.wg.Start(listener.pop) + } + p.listenersStarted = true + }() + <-stopCh + + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + for listener := range p.listeners { + close(listener.addCh) // Tell .pop() to stop. .pop() will tell .run() to stop + } + + // Wipe out list of listeners since they are now closed + // (processorListener cannot be re-used) + p.listeners = nil + + // Reset to false since no listeners are running + p.listenersStarted = false + + p.wg.Wait() // Wait for all .pop() and .run() to stop +} + +// shouldResync queries every listener to determine if any of them need a resync, based on each +// listener's resyncPeriod. +func (p *sharedProcessor) shouldResync() bool { + p.listenersLock.Lock() + defer p.listenersLock.Unlock() + + resyncNeeded := false + now := p.clock.Now() + for listener := range p.listeners { + // need to loop through all the listeners to see if they need to resync so we can prepare any + // listeners that are going to be resyncing. + shouldResync := listener.shouldResync(now) + p.listeners[listener] = shouldResync + + if shouldResync { + resyncNeeded = true + listener.determineNextResync(now) + } + } + return resyncNeeded +} + +func (p *sharedProcessor) resyncCheckPeriodChanged(resyncCheckPeriod time.Duration) { + p.listenersLock.RLock() + defer p.listenersLock.RUnlock() + + for listener := range p.listeners { + resyncPeriod := determineResyncPeriod( + listener.requestedResyncPeriod, resyncCheckPeriod) + listener.setResyncPeriod(resyncPeriod) + } +} + +// processorListener relays notifications from a sharedProcessor to +// one ResourceEventHandler --- using two goroutines, two unbuffered +// channels, and an unbounded ring buffer. The `add(notification)` +// function sends the given notification to `addCh`. One goroutine +// runs `pop()`, which pumps notifications from `addCh` to `nextCh` +// using storage in the ring buffer while `nextCh` is not keeping up. +// Another goroutine runs `run()`, which receives notifications from +// `nextCh` and synchronously invokes the appropriate handler method. +// +// processorListener also keeps track of the adjusted requested resync +// period of the listener. +type processorListener struct { + nextCh chan interface{} + addCh chan interface{} + + handler ResourceEventHandler + + // pendingNotifications is an unbounded ring buffer that holds all notifications not yet distributed. + // There is one per listener, but a failing/stalled listener will have infinite pendingNotifications + // added until we OOM. + // TODO: This is no worse than before, since reflectors were backed by unbounded DeltaFIFOs, but + // we should try to do something better. + pendingNotifications buffer.RingGrowing + + // requestedResyncPeriod is how frequently the listener wants a + // full resync from the shared informer, but modified by two + // adjustments. One is imposing a lower bound, + // `minimumResyncPeriod`. The other is another lower bound, the + // sharedIndexInformer's `resyncCheckPeriod`, that is imposed (a) only + // in AddEventHandlerWithResyncPeriod invocations made after the + // sharedIndexInformer starts and (b) only if the informer does + // resyncs at all. + requestedResyncPeriod time.Duration + // resyncPeriod is the threshold that will be used in the logic + // for this listener. This value differs from + // requestedResyncPeriod only when the sharedIndexInformer does + // not do resyncs, in which case the value here is zero. The + // actual time between resyncs depends on when the + // sharedProcessor's `shouldResync` function is invoked and when + // the sharedIndexInformer processes `Sync` type Delta objects. + resyncPeriod time.Duration + // nextResync is the earliest time the listener should get a full resync + nextResync time.Time + // resyncLock guards access to resyncPeriod and nextResync + resyncLock sync.Mutex +} + +func newProcessListener(handler ResourceEventHandler, requestedResyncPeriod, resyncPeriod time.Duration, now time.Time, bufferSize int) *processorListener { + ret := &processorListener{ + nextCh: make(chan interface{}), + addCh: make(chan interface{}), + handler: handler, + pendingNotifications: *buffer.NewRingGrowing(bufferSize), + requestedResyncPeriod: requestedResyncPeriod, + resyncPeriod: resyncPeriod, + } + + ret.determineNextResync(now) + + return ret +} + +func (p *processorListener) add(notification interface{}) { + p.addCh <- notification +} + +func (p *processorListener) pop() { + defer utilruntime.HandleCrash() + defer close(p.nextCh) // Tell .run() to stop + + var nextCh chan<- interface{} + var notification interface{} + for { + select { + case nextCh <- notification: + // Notification dispatched + var ok bool + notification, ok = p.pendingNotifications.ReadOne() + if !ok { // Nothing to pop + nextCh = nil // Disable this select case + } + case notificationToAdd, ok := <-p.addCh: + if !ok { + return + } + if notification == nil { // No notification to pop (and pendingNotifications is empty) + // Optimize the case - skip adding to pendingNotifications + notification = notificationToAdd + nextCh = p.nextCh + } else { // There is already a notification waiting to be dispatched + p.pendingNotifications.WriteOne(notificationToAdd) + } + } + } +} + +func (p *processorListener) run() { + // this call blocks until the channel is closed. When a panic happens during the notification + // we will catch it, **the offending item will be skipped!**, and after a short delay (one second) + // the next notification will be attempted. This is usually better than the alternative of never + // delivering again. + stopCh := make(chan struct{}) + wait.Until(func() { + for next := range p.nextCh { + switch notification := next.(type) { + case updateNotification: + p.handler.OnUpdate(notification.oldObj, notification.newObj) + case addNotification: + p.handler.OnAdd(notification.newObj) + case deleteNotification: + p.handler.OnDelete(notification.oldObj) + default: + utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next)) + } + } + // the only way to get here is if the p.nextCh is empty and closed + close(stopCh) + }, 1*time.Second, stopCh) +} + +// shouldResync deterimines if the listener needs a resync. If the listener's resyncPeriod is 0, +// this always returns false. +func (p *processorListener) shouldResync(now time.Time) bool { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + if p.resyncPeriod == 0 { + return false + } + + return now.After(p.nextResync) || now.Equal(p.nextResync) +} + +func (p *processorListener) determineNextResync(now time.Time) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.nextResync = now.Add(p.resyncPeriod) +} + +func (p *processorListener) setResyncPeriod(resyncPeriod time.Duration) { + p.resyncLock.Lock() + defer p.resyncLock.Unlock() + + p.resyncPeriod = resyncPeriod +} diff --git a/vendor/k8s.io/client-go/tools/cache/store.go b/vendor/k8s.io/client-go/tools/cache/store.go new file mode 100644 index 00000000..5308ea74 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/store.go @@ -0,0 +1,276 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" +) + +// Store is a generic object storage and processing interface. A +// Store holds a map from string keys to accumulators, and has +// operations to add, update, and delete a given object to/from the +// accumulator currently associated with a given key. A Store also +// knows how to extract the key from a given object, so many operations +// are given only the object. +// +// In the simplest Store implementations each accumulator is simply +// the last given object, or empty after Delete, and thus the Store's +// behavior is simple storage. +// +// Reflector knows how to watch a server and update a Store. This +// package provides a variety of implementations of Store. +type Store interface { + + // Add adds the given object to the accumulator associated with the given object's key + Add(obj interface{}) error + + // Update updates the given object in the accumulator associated with the given object's key + Update(obj interface{}) error + + // Delete deletes the given object from the accumulator associated with the given object's key + Delete(obj interface{}) error + + // List returns a list of all the currently non-empty accumulators + List() []interface{} + + // ListKeys returns a list of all the keys currently associated with non-empty accumulators + ListKeys() []string + + // Get returns the accumulator associated with the given object's key + Get(obj interface{}) (item interface{}, exists bool, err error) + + // GetByKey returns the accumulator associated with the given key + GetByKey(key string) (item interface{}, exists bool, err error) + + // Replace will delete the contents of the store, using instead the + // given list. Store takes ownership of the list, you should not reference + // it after calling this function. + Replace([]interface{}, string) error + + // Resync is meaningless in the terms appearing here but has + // meaning in some implementations that have non-trivial + // additional behavior (e.g., DeltaFIFO). + Resync() error +} + +// KeyFunc knows how to make a key from an object. Implementations should be deterministic. +type KeyFunc func(obj interface{}) (string, error) + +// KeyError will be returned any time a KeyFunc gives an error; it includes the object +// at fault. +type KeyError struct { + Obj interface{} + Err error +} + +// Error gives a human-readable description of the error. +func (k KeyError) Error() string { + return fmt.Sprintf("couldn't create key for object %+v: %v", k.Obj, k.Err) +} + +// Unwrap implements errors.Unwrap +func (k KeyError) Unwrap() error { + return k.Err +} + +// ExplicitKey can be passed to MetaNamespaceKeyFunc if you have the key for +// the object but not the object itself. +type ExplicitKey string + +// MetaNamespaceKeyFunc is a convenient default KeyFunc which knows how to make +// keys for API objects which implement meta.Interface. +// The key uses the format / unless is empty, then +// it's just . +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func MetaNamespaceKeyFunc(obj interface{}) (string, error) { + if key, ok := obj.(ExplicitKey); ok { + return string(key), nil + } + meta, err := meta.Accessor(obj) + if err != nil { + return "", fmt.Errorf("object has no meta: %v", err) + } + if len(meta.GetNamespace()) > 0 { + return meta.GetNamespace() + "/" + meta.GetName(), nil + } + return meta.GetName(), nil +} + +// SplitMetaNamespaceKey returns the namespace and name that +// MetaNamespaceKeyFunc encoded into key. +// +// TODO: replace key-as-string with a key-as-struct so that this +// packing/unpacking won't be necessary. +func SplitMetaNamespaceKey(key string) (namespace, name string, err error) { + parts := strings.Split(key, "/") + switch len(parts) { + case 1: + // name only, no namespace + return "", parts[0], nil + case 2: + // namespace and name + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected key format: %q", key) +} + +// `*cache` implements Indexer in terms of a ThreadSafeStore and an +// associated KeyFunc. +type cache struct { + // cacheStorage bears the burden of thread safety for the cache + cacheStorage ThreadSafeStore + // keyFunc is used to make the key for objects stored in and retrieved from items, and + // should be deterministic. + keyFunc KeyFunc +} + +var _ Store = &cache{} + +// Add inserts an item into the cache. +func (c *cache) Add(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Add(key, obj) + return nil +} + +// Update sets an item in the cache to its updated state. +func (c *cache) Update(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Update(key, obj) + return nil +} + +// Delete removes an item from the cache. +func (c *cache) Delete(obj interface{}) error { + key, err := c.keyFunc(obj) + if err != nil { + return KeyError{obj, err} + } + c.cacheStorage.Delete(key) + return nil +} + +// List returns a list of all the items. +// List is completely threadsafe as long as you treat all items as immutable. +func (c *cache) List() []interface{} { + return c.cacheStorage.List() +} + +// ListKeys returns a list of all the keys of the objects currently +// in the cache. +func (c *cache) ListKeys() []string { + return c.cacheStorage.ListKeys() +} + +// GetIndexers returns the indexers of cache +func (c *cache) GetIndexers() Indexers { + return c.cacheStorage.GetIndexers() +} + +// Index returns a list of items that match on the index function +// Index is thread-safe so long as you treat all items as immutable +func (c *cache) Index(indexName string, obj interface{}) ([]interface{}, error) { + return c.cacheStorage.Index(indexName, obj) +} + +// IndexKeys returns the storage keys of the stored objects whose set of +// indexed values for the named index includes the given indexed value. +// The returned keys are suitable to pass to GetByKey(). +func (c *cache) IndexKeys(indexName, indexedValue string) ([]string, error) { + return c.cacheStorage.IndexKeys(indexName, indexedValue) +} + +// ListIndexFuncValues returns the list of generated values of an Index func +func (c *cache) ListIndexFuncValues(indexName string) []string { + return c.cacheStorage.ListIndexFuncValues(indexName) +} + +// ByIndex returns the stored objects whose set of indexed values +// for the named index includes the given indexed value. +func (c *cache) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + return c.cacheStorage.ByIndex(indexName, indexedValue) +} + +func (c *cache) AddIndexers(newIndexers Indexers) error { + return c.cacheStorage.AddIndexers(newIndexers) +} + +// Get returns the requested item, or sets exists=false. +// Get is completely threadsafe as long as you treat all items as immutable. +func (c *cache) Get(obj interface{}) (item interface{}, exists bool, err error) { + key, err := c.keyFunc(obj) + if err != nil { + return nil, false, KeyError{obj, err} + } + return c.GetByKey(key) +} + +// GetByKey returns the request item, or exists=false. +// GetByKey is completely threadsafe as long as you treat all items as immutable. +func (c *cache) GetByKey(key string) (item interface{}, exists bool, err error) { + item, exists = c.cacheStorage.Get(key) + return item, exists, nil +} + +// Replace will delete the contents of 'c', using instead the given list. +// 'c' takes ownership of the list, you should not reference the list again +// after calling this function. +func (c *cache) Replace(list []interface{}, resourceVersion string) error { + items := make(map[string]interface{}, len(list)) + for _, item := range list { + key, err := c.keyFunc(item) + if err != nil { + return KeyError{item, err} + } + items[key] = item + } + c.cacheStorage.Replace(items, resourceVersion) + return nil +} + +// Resync is meaningless for one of these +func (c *cache) Resync() error { + return nil +} + +// NewStore returns a Store implemented simply with a map and a lock. +func NewStore(keyFunc KeyFunc) Store { + return &cache{ + cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}), + keyFunc: keyFunc, + } +} + +// NewIndexer returns an Indexer implemented simply with a map and a lock. +func NewIndexer(keyFunc KeyFunc, indexers Indexers) Indexer { + return &cache{ + cacheStorage: NewThreadSafeStore(indexers, Indices{}), + keyFunc: keyFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go new file mode 100644 index 00000000..145e93ee --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/thread_safe_store.go @@ -0,0 +1,363 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// ThreadSafeStore is an interface that allows concurrent indexed +// access to a storage backend. It is like Indexer but does not +// (necessarily) know how to extract the Store key from a given +// object. +// +// TL;DR caveats: you must not modify anything returned by Get or List as it will break +// the indexing feature in addition to not being thread safe. +// +// The guarantees of thread safety provided by List/Get are only valid if the caller +// treats returned items as read-only. For example, a pointer inserted in the store +// through `Add` will be returned as is by `Get`. Multiple clients might invoke `Get` +// on the same key and modify the pointer in a non-thread-safe way. Also note that +// modifying objects stored by the indexers (if any) will *not* automatically lead +// to a re-index. So it's not a good idea to directly modify the objects returned by +// Get/List, in general. +type ThreadSafeStore interface { + Add(key string, obj interface{}) + Update(key string, obj interface{}) + Delete(key string) + Get(key string) (item interface{}, exists bool) + List() []interface{} + ListKeys() []string + Replace(map[string]interface{}, string) + Index(indexName string, obj interface{}) ([]interface{}, error) + IndexKeys(indexName, indexedValue string) ([]string, error) + ListIndexFuncValues(name string) []string + ByIndex(indexName, indexedValue string) ([]interface{}, error) + GetIndexers() Indexers + + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(newIndexers Indexers) error + // Resync is a no-op and is deprecated + Resync() error +} + +// storeIndex implements the indexing functionality for Store interface +type storeIndex struct { + // indexers maps a name to an IndexFunc + indexers Indexers + // indices maps a name to an Index + indices Indices +} + +func (i *storeIndex) reset() { + i.indices = Indices{} +} + +func (i *storeIndex) getKeysFromIndex(indexName string, obj interface{}) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + indexedValues, err := indexFunc(obj) + if err != nil { + return nil, err + } + index := i.indices[indexName] + + var storeKeySet sets.String + if len(indexedValues) == 1 { + // In majority of cases, there is exactly one value matching. + // Optimize the most common path - deduping is not needed here. + storeKeySet = index[indexedValues[0]] + } else { + // Need to de-dupe the return list. + // Since multiple keys are allowed, this can happen. + storeKeySet = sets.String{} + for _, indexedValue := range indexedValues { + for key := range index[indexedValue] { + storeKeySet.Insert(key) + } + } + } + + return storeKeySet, nil +} + +func (i *storeIndex) getKeysByIndex(indexName, indexedValue string) (sets.String, error) { + indexFunc := i.indexers[indexName] + if indexFunc == nil { + return nil, fmt.Errorf("Index with name %s does not exist", indexName) + } + + index := i.indices[indexName] + return index[indexedValue], nil +} + +func (i *storeIndex) getIndexValues(indexName string) []string { + index := i.indices[indexName] + names := make([]string, 0, len(index)) + for key := range index { + names = append(names, key) + } + return names +} + +func (i *storeIndex) addIndexers(newIndexers Indexers) error { + oldKeys := sets.StringKeySet(i.indexers) + newKeys := sets.StringKeySet(newIndexers) + + if oldKeys.HasAny(newKeys.List()...) { + return fmt.Errorf("indexer conflict: %v", oldKeys.Intersection(newKeys)) + } + + for k, v := range newIndexers { + i.indexers[k] = v + } + return nil +} + +// updateIndices modifies the objects location in the managed indexes: +// - for create you must provide only the newObj +// - for update you must provide both the oldObj and the newObj +// - for delete you must provide only the oldObj +// updateIndices must be called from a function that already has a lock on the cache +func (i *storeIndex) updateIndices(oldObj interface{}, newObj interface{}, key string) { + var oldIndexValues, indexValues []string + var err error + for name, indexFunc := range i.indexers { + if oldObj != nil { + oldIndexValues, err = indexFunc(oldObj) + } else { + oldIndexValues = oldIndexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + if newObj != nil { + indexValues, err = indexFunc(newObj) + } else { + indexValues = indexValues[:0] + } + if err != nil { + panic(fmt.Errorf("unable to calculate an index entry for key %q on index %q: %v", key, name, err)) + } + + index := i.indices[name] + if index == nil { + index = Index{} + i.indices[name] = index + } + + if len(indexValues) == 1 && len(oldIndexValues) == 1 && indexValues[0] == oldIndexValues[0] { + // We optimize for the most common case where indexFunc returns a single value which has not been changed + continue + } + + for _, value := range oldIndexValues { + i.deleteKeyFromIndex(key, value, index) + } + for _, value := range indexValues { + i.addKeyToIndex(key, value, index) + } + } +} + +func (i *storeIndex) addKeyToIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + set = sets.String{} + index[indexValue] = set + } + set.Insert(key) +} + +func (i *storeIndex) deleteKeyFromIndex(key, indexValue string, index Index) { + set := index[indexValue] + if set == nil { + return + } + set.Delete(key) + // If we don't delete the set when zero, indices with high cardinality + // short lived resources can cause memory to increase over time from + // unused empty sets. See `kubernetes/kubernetes/issues/84959`. + if len(set) == 0 { + delete(index, indexValue) + } +} + +// threadSafeMap implements ThreadSafeStore +type threadSafeMap struct { + lock sync.RWMutex + items map[string]interface{} + + // index implements the indexing functionality + index *storeIndex +} + +func (c *threadSafeMap) Add(key string, obj interface{}) { + c.Update(key, obj) +} + +func (c *threadSafeMap) Update(key string, obj interface{}) { + c.lock.Lock() + defer c.lock.Unlock() + oldObject := c.items[key] + c.items[key] = obj + c.index.updateIndices(oldObject, obj, key) +} + +func (c *threadSafeMap) Delete(key string) { + c.lock.Lock() + defer c.lock.Unlock() + if obj, exists := c.items[key]; exists { + c.index.updateIndices(obj, nil, key) + delete(c.items, key) + } +} + +func (c *threadSafeMap) Get(key string) (item interface{}, exists bool) { + c.lock.RLock() + defer c.lock.RUnlock() + item, exists = c.items[key] + return item, exists +} + +func (c *threadSafeMap) List() []interface{} { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]interface{}, 0, len(c.items)) + for _, item := range c.items { + list = append(list, item) + } + return list +} + +// ListKeys returns a list of all the keys of the objects currently +// in the threadSafeMap. +func (c *threadSafeMap) ListKeys() []string { + c.lock.RLock() + defer c.lock.RUnlock() + list := make([]string, 0, len(c.items)) + for key := range c.items { + list = append(list, key) + } + return list +} + +func (c *threadSafeMap) Replace(items map[string]interface{}, resourceVersion string) { + c.lock.Lock() + defer c.lock.Unlock() + c.items = items + + // rebuild any index + c.index.reset() + for key, item := range c.items { + c.index.updateIndices(nil, item, key) + } +} + +// Index returns a list of items that match the given object on the index function. +// Index is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) Index(indexName string, obj interface{}) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + storeKeySet, err := c.index.getKeysFromIndex(indexName, obj) + if err != nil { + return nil, err + } + + list := make([]interface{}, 0, storeKeySet.Len()) + for storeKey := range storeKeySet { + list = append(list, c.items[storeKey]) + } + return list, nil +} + +// ByIndex returns a list of the items whose indexed values in the given index include the given indexed value +func (c *threadSafeMap) ByIndex(indexName, indexedValue string) ([]interface{}, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + list := make([]interface{}, 0, set.Len()) + for key := range set { + list = append(list, c.items[key]) + } + + return list, nil +} + +// IndexKeys returns a list of the Store keys of the objects whose indexed values in the given index include the given indexed value. +// IndexKeys is thread-safe so long as you treat all items as immutable. +func (c *threadSafeMap) IndexKeys(indexName, indexedValue string) ([]string, error) { + c.lock.RLock() + defer c.lock.RUnlock() + + set, err := c.index.getKeysByIndex(indexName, indexedValue) + if err != nil { + return nil, err + } + return set.List(), nil +} + +func (c *threadSafeMap) ListIndexFuncValues(indexName string) []string { + c.lock.RLock() + defer c.lock.RUnlock() + + return c.index.getIndexValues(indexName) +} + +func (c *threadSafeMap) GetIndexers() Indexers { + return c.index.indexers +} + +func (c *threadSafeMap) AddIndexers(newIndexers Indexers) error { + c.lock.Lock() + defer c.lock.Unlock() + + if len(c.items) > 0 { + return fmt.Errorf("cannot add indexers to running index") + } + + return c.index.addIndexers(newIndexers) +} + +func (c *threadSafeMap) Resync() error { + // Nothing to do + return nil +} + +// NewThreadSafeStore creates a new instance of ThreadSafeStore. +func NewThreadSafeStore(indexers Indexers, indices Indices) ThreadSafeStore { + return &threadSafeMap{ + items: map[string]interface{}{}, + index: &storeIndex{ + indexers: indexers, + indices: indices, + }, + } +} diff --git a/vendor/k8s.io/client-go/tools/cache/undelta_store.go b/vendor/k8s.io/client-go/tools/cache/undelta_store.go new file mode 100644 index 00000000..220845dd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/cache/undelta_store.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +// UndeltaStore listens to incremental updates and sends complete state on every change. +// It implements the Store interface so that it can receive a stream of mirrored objects +// from Reflector. Whenever it receives any complete (Store.Replace) or incremental change +// (Store.Add, Store.Update, Store.Delete), it sends the complete state by calling PushFunc. +// It is thread-safe. It guarantees that every change (Add, Update, Replace, Delete) results +// in one call to PushFunc, but sometimes PushFunc may be called twice with the same values. +// PushFunc should be thread safe. +type UndeltaStore struct { + Store + PushFunc func([]interface{}) +} + +// Assert that it implements the Store interface. +var _ Store = &UndeltaStore{} + +// Add inserts an object into the store and sends complete state by calling PushFunc. +// Note about thread safety. The Store implementation (cache.cache) uses a lock for all methods. +// In the functions below, the lock gets released and reacquired betweend the {Add,Delete,etc} +// and the List. So, the following can happen, resulting in two identical calls to PushFunc. +// time thread 1 thread 2 +// 0 UndeltaStore.Add(a) +// 1 UndeltaStore.Add(b) +// 2 Store.Add(a) +// 3 Store.Add(b) +// 4 Store.List() -> [a,b] +// 5 Store.List() -> [a,b] +func (u *UndeltaStore) Add(obj interface{}) error { + if err := u.Store.Add(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Update sets an item in the cache to its updated state and sends complete state by calling PushFunc. +func (u *UndeltaStore) Update(obj interface{}) error { + if err := u.Store.Update(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Delete removes an item from the cache and sends complete state by calling PushFunc. +func (u *UndeltaStore) Delete(obj interface{}) error { + if err := u.Store.Delete(obj); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// Replace will delete the contents of current store, using instead the given list. +// 'u' takes ownership of the list, you should not reference the list again +// after calling this function. +// The new contents complete state will be sent by calling PushFunc after replacement. +func (u *UndeltaStore) Replace(list []interface{}, resourceVersion string) error { + if err := u.Store.Replace(list, resourceVersion); err != nil { + return err + } + u.PushFunc(u.Store.List()) + return nil +} + +// NewUndeltaStore returns an UndeltaStore implemented with a Store. +func NewUndeltaStore(pushFunc func([]interface{}), keyFunc KeyFunc) *UndeltaStore { + return &UndeltaStore{ + Store: NewStore(keyFunc), + PushFunc: pushFunc, + } +} diff --git a/vendor/k8s.io/client-go/tools/pager/pager.go b/vendor/k8s.io/client-go/tools/pager/pager.go new file mode 100644 index 00000000..9ba988f6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/pager/pager.go @@ -0,0 +1,255 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pager + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metainternalversion "k8s.io/apimachinery/pkg/apis/meta/internalversion" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +const defaultPageSize = 500 +const defaultPageBufferSize = 10 + +// ListPageFunc returns a list object for the given list options. +type ListPageFunc func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) + +// SimplePageFunc adapts a context-less list function into one that accepts a context. +func SimplePageFunc(fn func(opts metav1.ListOptions) (runtime.Object, error)) ListPageFunc { + return func(ctx context.Context, opts metav1.ListOptions) (runtime.Object, error) { + return fn(opts) + } +} + +// ListPager assists client code in breaking large list queries into multiple +// smaller chunks of PageSize or smaller. PageFn is expected to accept a +// metav1.ListOptions that supports paging and return a list. The pager does +// not alter the field or label selectors on the initial options list. +type ListPager struct { + PageSize int64 + PageFn ListPageFunc + + FullListIfExpired bool + + // Number of pages to buffer + PageBufferSize int32 +} + +// New creates a new pager from the provided pager function using the default +// options. It will fall back to a full list if an expiration error is encountered +// as a last resort. +func New(fn ListPageFunc) *ListPager { + return &ListPager{ + PageSize: defaultPageSize, + PageFn: fn, + FullListIfExpired: true, + PageBufferSize: defaultPageBufferSize, + } +} + +// TODO: introduce other types of paging functions - such as those that retrieve from a list +// of namespaces. + +// List returns a single list object, but attempts to retrieve smaller chunks from the +// server to reduce the impact on the server. If the chunk attempt fails, it will load +// the full list instead. The Limit field on options, if unset, will default to the page size. +func (p *ListPager) List(ctx context.Context, options metav1.ListOptions) (runtime.Object, bool, error) { + if options.Limit == 0 { + options.Limit = p.PageSize + } + requestedResourceVersion := options.ResourceVersion + requestedResourceVersionMatch := options.ResourceVersionMatch + var list *metainternalversion.List + paginatedResult := false + + for { + select { + case <-ctx.Done(): + return nil, paginatedResult, ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + // Only fallback to full list if an "Expired" errors is returned, FullListIfExpired is true, and + // the "Expired" error occurred in page 2 or later (since full list is intended to prevent a pager.List from + // failing when the resource versions is established by the first page request falls out of the compaction + // during the subsequent list requests). + if !errors.IsResourceExpired(err) || !p.FullListIfExpired || options.Continue == "" { + return nil, paginatedResult, err + } + // the list expired while we were processing, fall back to a full list at + // the requested ResourceVersion. + options.Limit = 0 + options.Continue = "" + options.ResourceVersion = requestedResourceVersion + options.ResourceVersionMatch = requestedResourceVersionMatch + result, err := p.PageFn(ctx, options) + return result, paginatedResult, err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return nil, paginatedResult, fmt.Errorf("returned object must be a list: %v", err) + } + + // exit early and return the object we got if we haven't processed any pages + if len(m.GetContinue()) == 0 && list == nil { + return obj, paginatedResult, nil + } + + // initialize the list and fill its contents + if list == nil { + list = &metainternalversion.List{Items: make([]runtime.Object, 0, options.Limit+1)} + list.ResourceVersion = m.GetResourceVersion() + list.SelfLink = m.GetSelfLink() + } + if err := meta.EachListItem(obj, func(obj runtime.Object) error { + list.Items = append(list.Items, obj) + return nil + }); err != nil { + return nil, paginatedResult, err + } + + // if we have no more items, return the list + if len(m.GetContinue()) == 0 { + return list, paginatedResult, nil + } + + // set the next loop up + options.Continue = m.GetContinue() + // Clear the ResourceVersion(Match) on the subsequent List calls to avoid the + // `specifying resource version is not allowed when using continue` error. + // See https://github.com/kubernetes/kubernetes/issues/85221#issuecomment-553748143. + options.ResourceVersion = "" + options.ResourceVersionMatch = "" + // At this point, result is already paginated. + paginatedResult = true + } +} + +// EachListItem fetches runtime.Object items using this ListPager and invokes fn on each item. If +// fn returns an error, processing stops and that error is returned. If fn does not return an error, +// any error encountered while retrieving the list from the server is returned. If the context +// cancels or times out, the context error is returned. Since the list is retrieved in paginated +// chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the pagination list +// requests exceed the expiration limit of the apiserver being called. +// +// Items are retrieved in chunks from the server to reduce the impact on the server with up to +// ListPager.PageBufferSize chunks buffered concurrently in the background. +func (p *ListPager) EachListItem(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + return p.eachListChunkBuffered(ctx, options, func(obj runtime.Object) error { + return meta.EachListItem(obj, fn) + }) +} + +// eachListChunkBuffered fetches runtimeObject list chunks using this ListPager and invokes fn on +// each list chunk. If fn returns an error, processing stops and that error is returned. If fn does +// not return an error, any error encountered while retrieving the list from the server is +// returned. If the context cancels or times out, the context error is returned. Since the list is +// retrieved in paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if +// the pagination list requests exceed the expiration limit of the apiserver being called. +// +// Up to ListPager.PageBufferSize chunks are buffered concurrently in the background. +func (p *ListPager) eachListChunkBuffered(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if p.PageBufferSize < 0 { + return fmt.Errorf("ListPager.PageBufferSize must be >= 0, got %d", p.PageBufferSize) + } + + // Ensure background goroutine is stopped if this call exits before all list items are + // processed. Cancelation error from this deferred cancel call is never returned to caller; + // either the list result has already been sent to bgResultC or the fn error is returned and + // the cancelation error is discarded. + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + chunkC := make(chan runtime.Object, p.PageBufferSize) + bgResultC := make(chan error, 1) + go func() { + defer utilruntime.HandleCrash() + + var err error + defer func() { + close(chunkC) + bgResultC <- err + }() + err = p.eachListChunk(ctx, options, func(chunk runtime.Object) error { + select { + case chunkC <- chunk: // buffer the chunk, this can block + case <-ctx.Done(): + return ctx.Err() + } + return nil + }) + }() + + for o := range chunkC { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + err := fn(o) + if err != nil { + return err // any fn error should be returned immediately + } + } + // promote the results of our background goroutine to the foreground + return <-bgResultC +} + +// eachListChunk fetches runtimeObject list chunks using this ListPager and invokes fn on each list +// chunk. If fn returns an error, processing stops and that error is returned. If fn does not return +// an error, any error encountered while retrieving the list from the server is returned. If the +// context cancels or times out, the context error is returned. Since the list is retrieved in +// paginated chunks, an "Expired" error (metav1.StatusReasonExpired) may be returned if the +// pagination list requests exceed the expiration limit of the apiserver being called. +func (p *ListPager) eachListChunk(ctx context.Context, options metav1.ListOptions, fn func(obj runtime.Object) error) error { + if options.Limit == 0 { + options.Limit = p.PageSize + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + obj, err := p.PageFn(ctx, options) + if err != nil { + return err + } + m, err := meta.ListAccessor(obj) + if err != nil { + return fmt.Errorf("returned object must be a list: %v", err) + } + if err := fn(obj); err != nil { + return err + } + // if we have no more items, return. + if len(m.GetContinue()) == 0 { + return nil + } + // set the next loop up + options.Continue = m.GetContinue() + } +} diff --git a/vendor/k8s.io/client-go/util/retry/OWNERS b/vendor/k8s.io/client-go/util/retry/OWNERS new file mode 100644 index 00000000..75736b5a --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/OWNERS @@ -0,0 +1,4 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: + - caesarxuchao diff --git a/vendor/k8s.io/client-go/util/retry/util.go b/vendor/k8s.io/client-go/util/retry/util.go new file mode 100644 index 00000000..0c6e504a --- /dev/null +++ b/vendor/k8s.io/client-go/util/retry/util.go @@ -0,0 +1,105 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package retry + +import ( + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/wait" +) + +// DefaultRetry is the recommended retry for a conflict where multiple clients +// are making changes to the same resource. +var DefaultRetry = wait.Backoff{ + Steps: 5, + Duration: 10 * time.Millisecond, + Factor: 1.0, + Jitter: 0.1, +} + +// DefaultBackoff is the recommended backoff for a conflict where a client +// may be attempting to make an unrelated modification to a resource under +// active management by one or more controllers. +var DefaultBackoff = wait.Backoff{ + Steps: 4, + Duration: 10 * time.Millisecond, + Factor: 5.0, + Jitter: 0.1, +} + +// OnError allows the caller to retry fn in case the error returned by fn is retriable +// according to the provided function. backoff defines the maximum retries and the wait +// interval between two retries. +func OnError(backoff wait.Backoff, retriable func(error) bool, fn func() error) error { + var lastErr error + err := wait.ExponentialBackoff(backoff, func() (bool, error) { + err := fn() + switch { + case err == nil: + return true, nil + case retriable(err): + lastErr = err + return false, nil + default: + return false, err + } + }) + if err == wait.ErrWaitTimeout { + err = lastErr + } + return err +} + +// RetryOnConflict is used to make an update to a resource when you have to worry about +// conflicts caused by other code making unrelated updates to the resource at the same +// time. fn should fetch the resource to be modified, make appropriate changes to it, try +// to update it, and return (unmodified) the error from the update function. On a +// successful update, RetryOnConflict will return nil. If the update function returns a +// "Conflict" error, RetryOnConflict will wait some amount of time as described by +// backoff, and then try again. On a non-"Conflict" error, or if it retries too many times +// and gives up, RetryOnConflict will return an error to the caller. +// +// err := retry.RetryOnConflict(retry.DefaultRetry, func() error { +// // Fetch the resource here; you need to refetch it on every try, since +// // if you got a conflict on the last update attempt then you need to get +// // the current version before making your own changes. +// pod, err := c.Pods("mynamespace").Get(name, metav1.GetOptions{}) +// if err != nil { +// return err +// } +// +// // Make whatever updates to the resource are needed +// pod.Status.Phase = v1.PodFailed +// +// // Try to update +// _, err = c.Pods("mynamespace").UpdateStatus(pod) +// // You have to return err itself here (not wrapped inside another error) +// // so that RetryOnConflict can identify it correctly. +// return err +// }) +// if err != nil { +// // May be conflict if max retries were hit, or may be something unrelated +// // like permissions or a network error +// return err +// } +// ... +// +// TODO: Make Backoff an interface? +func RetryOnConflict(backoff wait.Backoff, fn func() error) error { + return OnError(backoff, errors.IsConflict, fn) +} diff --git a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go index ac88682a..d53b49da 100644 --- a/vendor/k8s.io/klog/v2/internal/buffer/buffer.go +++ b/vendor/k8s.io/klog/v2/internal/buffer/buffer.go @@ -40,44 +40,22 @@ type Buffer struct { next *Buffer } -// Buffers manages the reuse of individual buffer instances. It is thread-safe. -type Buffers struct { - // mu protects the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - mu sync.Mutex - - // freeList is a list of byte buffers, maintained under mu. - freeList *Buffer +var buffers = sync.Pool{ + New: func() interface{} { + return new(Buffer) + }, } // GetBuffer returns a new, ready-to-use buffer. -func (bl *Buffers) GetBuffer() *Buffer { - bl.mu.Lock() - b := bl.freeList - if b != nil { - bl.freeList = b.next - } - bl.mu.Unlock() - if b == nil { - b = new(Buffer) - } else { - b.next = nil - b.Reset() - } +func GetBuffer() *Buffer { + b := buffers.Get().(*Buffer) + b.Reset() return b } // PutBuffer returns a buffer to the free list. -func (bl *Buffers) PutBuffer(b *Buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - bl.mu.Lock() - b.next = bl.freeList - bl.freeList = b - bl.mu.Unlock() +func PutBuffer(b *Buffer) { + buffers.Put(b) } // Some custom tiny helper functions to print the log header efficiently. diff --git a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go index ad6bf111..f9558c3d 100644 --- a/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go +++ b/vendor/k8s.io/klog/v2/internal/serialize/keyvalues.go @@ -24,6 +24,10 @@ import ( "github.com/go-logr/logr" ) +type textWriter interface { + WriteText(*bytes.Buffer) +} + // WithValues implements LogSink.WithValues. The old key/value pairs are // assumed to be well-formed, the new ones are checked and padded if // necessary. It returns a new slice. @@ -91,6 +95,51 @@ func MergeKVs(first, second []interface{}) []interface{} { return merged } +// MergeKVsInto is a variant of MergeKVs which directly formats the key/value +// pairs into a buffer. +func MergeAndFormatKVs(b *bytes.Buffer, first, second []interface{}) { + if len(first) == 0 && len(second) == 0 { + // Nothing to do at all. + return + } + + if len(first) == 0 && len(second)%2 == 0 { + // Nothing to be overridden, second slice is well-formed + // and can be used directly. + for i := 0; i < len(second); i += 2 { + KVFormat(b, second[i], second[i+1]) + } + return + } + + // Determine which keys are in the second slice so that we can skip + // them when iterating over the first one. The code intentionally + // favors performance over completeness: we assume that keys are string + // constants and thus compare equal when the string values are equal. A + // string constant being overridden by, for example, a fmt.Stringer is + // not handled. + overrides := map[interface{}]bool{} + for i := 0; i < len(second); i += 2 { + overrides[second[i]] = true + } + for i := 0; i < len(first); i += 2 { + key := first[i] + if overrides[key] { + continue + } + KVFormat(b, key, first[i+1]) + } + // Round down. + l := len(second) + l = l / 2 * 2 + for i := 1; i < l; i += 2 { + KVFormat(b, second[i-1], second[i]) + } + if len(second)%2 == 1 { + KVFormat(b, second[len(second)-1], missingValue) + } +} + const missingValue = "(MISSING)" // KVListFormat serializes all key/value pairs into the provided buffer. @@ -104,66 +153,74 @@ func KVListFormat(b *bytes.Buffer, keysAndValues ...interface{}) { } else { v = missingValue } - b.WriteByte(' ') - // Keys are assumed to be well-formed according to - // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments - // for the sake of performance. Keys with spaces, - // special characters, etc. will break parsing. - if sK, ok := k.(string); ok { - // Avoid one allocation when the key is a string, which - // normally it should be. - b.WriteString(sK) - } else { - b.WriteString(fmt.Sprintf("%s", k)) - } + KVFormat(b, k, v) + } +} + +// KVFormat serializes one key/value pair into the provided buffer. +// A space gets inserted before the pair. +func KVFormat(b *bytes.Buffer, k, v interface{}) { + b.WriteByte(' ') + // Keys are assumed to be well-formed according to + // https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments + // for the sake of performance. Keys with spaces, + // special characters, etc. will break parsing. + if sK, ok := k.(string); ok { + // Avoid one allocation when the key is a string, which + // normally it should be. + b.WriteString(sK) + } else { + b.WriteString(fmt.Sprintf("%s", k)) + } - // The type checks are sorted so that more frequently used ones - // come first because that is then faster in the common - // cases. In Kubernetes, ObjectRef (a Stringer) is more common - // than plain strings - // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). - switch v := v.(type) { - case fmt.Stringer: - writeStringValue(b, true, StringerToString(v)) + // The type checks are sorted so that more frequently used ones + // come first because that is then faster in the common + // cases. In Kubernetes, ObjectRef (a Stringer) is more common + // than plain strings + // (https://github.com/kubernetes/kubernetes/pull/106594#issuecomment-975526235). + switch v := v.(type) { + case textWriter: + writeTextWriterValue(b, v) + case fmt.Stringer: + writeStringValue(b, true, StringerToString(v)) + case string: + writeStringValue(b, true, v) + case error: + writeStringValue(b, true, ErrorToString(v)) + case logr.Marshaler: + value := MarshalerToValue(v) + // A marshaler that returns a string is useful for + // delayed formatting of complex values. We treat this + // case like a normal string. This is useful for + // multi-line support. + // + // We could do this by recursively formatting a value, + // but that comes with the risk of infinite recursion + // if a marshaler returns itself. Instead we call it + // only once and rely on it returning the intended + // value directly. + switch value := value.(type) { case string: - writeStringValue(b, true, v) - case error: - writeStringValue(b, true, ErrorToString(v)) - case logr.Marshaler: - value := MarshalerToValue(v) - // A marshaler that returns a string is useful for - // delayed formatting of complex values. We treat this - // case like a normal string. This is useful for - // multi-line support. - // - // We could do this by recursively formatting a value, - // but that comes with the risk of infinite recursion - // if a marshaler returns itself. Instead we call it - // only once and rely on it returning the intended - // value directly. - switch value := value.(type) { - case string: - writeStringValue(b, true, value) - default: - writeStringValue(b, false, fmt.Sprintf("%+v", value)) - } - case []byte: - // In https://github.com/kubernetes/klog/pull/237 it was decided - // to format byte slices with "%+q". The advantages of that are: - // - readable output if the bytes happen to be printable - // - non-printable bytes get represented as unicode escape - // sequences (\uxxxx) - // - // The downsides are that we cannot use the faster - // strconv.Quote here and that multi-line output is not - // supported. If developers know that a byte array is - // printable and they want multi-line output, they can - // convert the value to string before logging it. - b.WriteByte('=') - b.WriteString(fmt.Sprintf("%+q", v)) + writeStringValue(b, true, value) default: - writeStringValue(b, false, fmt.Sprintf("%+v", v)) + writeStringValue(b, false, fmt.Sprintf("%+v", value)) } + case []byte: + // In https://github.com/kubernetes/klog/pull/237 it was decided + // to format byte slices with "%+q". The advantages of that are: + // - readable output if the bytes happen to be printable + // - non-printable bytes get represented as unicode escape + // sequences (\uxxxx) + // + // The downsides are that we cannot use the faster + // strconv.Quote here and that multi-line output is not + // supported. If developers know that a byte array is + // printable and they want multi-line output, they can + // convert the value to string before logging it. + b.WriteByte('=') + b.WriteString(fmt.Sprintf("%+q", v)) + default: + writeStringValue(b, false, fmt.Sprintf("%+v", v)) } } @@ -203,6 +260,16 @@ func ErrorToString(err error) (ret string) { return } +func writeTextWriterValue(b *bytes.Buffer, v textWriter) { + b.WriteRune('=') + defer func() { + if err := recover(); err != nil { + fmt.Fprintf(b, `""`, err) + } + }() + v.WriteText(b) +} + func writeStringValue(b *bytes.Buffer, quote bool, v string) { data := []byte(v) index := bytes.IndexByte(data, '\n') diff --git a/vendor/k8s.io/klog/v2/k8s_references.go b/vendor/k8s.io/klog/v2/k8s_references.go index 2c218f69..ecd3f8b6 100644 --- a/vendor/k8s.io/klog/v2/k8s_references.go +++ b/vendor/k8s.io/klog/v2/k8s_references.go @@ -17,8 +17,10 @@ limitations under the License. package klog import ( + "bytes" "fmt" "reflect" + "strings" "github.com/go-logr/logr" ) @@ -31,11 +33,30 @@ type ObjectRef struct { func (ref ObjectRef) String() string { if ref.Namespace != "" { - return fmt.Sprintf("%s/%s", ref.Namespace, ref.Name) + var builder strings.Builder + builder.Grow(len(ref.Namespace) + len(ref.Name) + 1) + builder.WriteString(ref.Namespace) + builder.WriteRune('/') + builder.WriteString(ref.Name) + return builder.String() } return ref.Name } +func (ref ObjectRef) WriteText(out *bytes.Buffer) { + out.WriteRune('"') + ref.writeUnquoted(out) + out.WriteRune('"') +} + +func (ref ObjectRef) writeUnquoted(out *bytes.Buffer) { + if ref.Namespace != "" { + out.WriteString(ref.Namespace) + out.WriteRune('/') + } + out.WriteString(ref.Name) +} + // MarshalLog ensures that loggers with support for structured output will log // as a struct by removing the String method via a custom type. func (ref ObjectRef) MarshalLog() interface{} { @@ -117,31 +138,31 @@ var _ fmt.Stringer = kobjSlice{} var _ logr.Marshaler = kobjSlice{} func (ks kobjSlice) String() string { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return fmt.Sprintf("%v", objectRefs) } func (ks kobjSlice) MarshalLog() interface{} { - objectRefs, err := ks.process() - if err != nil { - return err.Error() + objectRefs, errStr := ks.process() + if errStr != "" { + return errStr } return objectRefs } -func (ks kobjSlice) process() ([]interface{}, error) { +func (ks kobjSlice) process() (objs []interface{}, err string) { s := reflect.ValueOf(ks.arg) switch s.Kind() { case reflect.Invalid: // nil parameter, print as nil. - return nil, nil + return nil, "" case reflect.Slice: // Okay, handle below. default: - return nil, fmt.Errorf("", ks.arg) + return nil, fmt.Sprintf("", ks.arg) } objectRefs := make([]interface{}, 0, s.Len()) for i := 0; i < s.Len(); i++ { @@ -151,8 +172,41 @@ func (ks kobjSlice) process() ([]interface{}, error) { } else if v, ok := item.(KMetadata); ok { objectRefs = append(objectRefs, KObj(v)) } else { - return nil, fmt.Errorf("", item) + return nil, fmt.Sprintf("", item) + } + } + return objectRefs, "" +} + +var nilToken = []byte("") + +func (ks kobjSlice) WriteText(out *bytes.Buffer) { + s := reflect.ValueOf(ks.arg) + switch s.Kind() { + case reflect.Invalid: + // nil parameter, print as empty slice. + out.WriteString("[]") + return + case reflect.Slice: + // Okay, handle below. + default: + fmt.Fprintf(out, `""`, ks.arg) + return + } + out.Write([]byte{'['}) + defer out.Write([]byte{']'}) + for i := 0; i < s.Len(); i++ { + if i > 0 { + out.Write([]byte{' '}) + } + item := s.Index(i).Interface() + if item == nil { + out.Write(nilToken) + } else if v, ok := item.(KMetadata); ok { + KObj(v).writeUnquoted(out) + } else { + fmt.Fprintf(out, "", item) + return } } - return objectRefs, nil } diff --git a/vendor/k8s.io/klog/v2/klog.go b/vendor/k8s.io/klog/v2/klog.go index 1bd11b67..c5d98ad3 100644 --- a/vendor/k8s.io/klog/v2/klog.go +++ b/vendor/k8s.io/klog/v2/klog.go @@ -532,11 +532,6 @@ func (s settings) deepCopy() settings { type loggingT struct { settings - // bufferCache maintains the free list. It uses its own mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - bufferCache buffer.Buffers - // flushD holds a flushDaemon that frequently flushes log file buffers. // Uses its own mutex. flushD *flushDaemon @@ -664,7 +659,7 @@ func (l *loggingT) header(s severity.Severity, depth int) (*buffer.Buffer, strin // formatHeader formats a log header using the provided file name and line number. func (l *loggingT) formatHeader(s severity.Severity, file string, line int) *buffer.Buffer { - buf := l.bufferCache.GetBuffer() + buf := buffer.GetBuffer() if l.skipHeaders { return buf } @@ -682,8 +677,8 @@ func (l *loggingT) printlnDepth(s severity.Severity, logger *logr.Logger, filter // if logger is set, we clear the generated header as we rely on the backing // logger implementation to print headers if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -701,8 +696,8 @@ func (l *loggingT) printDepth(s severity.Severity, logger *logr.Logger, filter L // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -723,8 +718,8 @@ func (l *loggingT) printfDepth(s severity.Severity, logger *logr.Logger, filter // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { format, args = filter.FilterF(format, args) @@ -744,8 +739,8 @@ func (l *loggingT) printWithFileLine(s severity.Severity, logger *logr.Logger, f // if logr is set, we clear the generated header as we rely on the backing // logr implementation to print headers if logger != nil { - l.bufferCache.PutBuffer(buf) - buf = l.bufferCache.GetBuffer() + buffer.PutBuffer(buf) + buf = buffer.GetBuffer() } if filter != nil { args = filter.Filter(args) @@ -785,7 +780,7 @@ func (l *loggingT) infoS(logger *logr.Logger, filter LogFilter, depth int, msg s // set log severity by s func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, keysAndValues ...interface{}) { // Only create a new buffer if we don't have one cached. - b := l.bufferCache.GetBuffer() + b := buffer.GetBuffer() // The message is always quoted, even if it contains line breaks. // If developers want multi-line output, they should use a small, fixed // message and put the multi-line output into a value. @@ -796,7 +791,7 @@ func (l *loggingT) printS(err error, s severity.Severity, depth int, msg string, serialize.KVListFormat(&b.Buffer, keysAndValues...) l.printDepth(s, logging.logger, nil, depth+1, &b.Buffer) // Make the buffer available for reuse. - l.bufferCache.PutBuffer(b) + buffer.PutBuffer(b) } // redirectBuffer is used to set an alternate destination for the logs @@ -948,7 +943,7 @@ func (l *loggingT) output(s severity.Severity, log *logr.Logger, buf *buffer.Buf timeoutFlush(ExitFlushTimeout) OsExit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. } - l.bufferCache.PutBuffer(buf) + buffer.PutBuffer(buf) if stats := severityStats[s]; stats != nil { atomic.AddInt64(&stats.lines, 1) @@ -1313,6 +1308,13 @@ func newVerbose(level Level, b bool) Verbose { // less than or equal to the value of the -vmodule pattern matching the source file // containing the call. func V(level Level) Verbose { + return VDepth(1, level) +} + +// VDepth is a variant of V that accepts a number of stack frames that will be +// skipped when checking the -vmodule patterns. VDepth(0) is equivalent to +// V(). +func VDepth(depth int, level Level) Verbose { // This function tries hard to be cheap unless there's work to do. // The fast path is two atomic loads and compares. @@ -1329,7 +1331,7 @@ func V(level Level) Verbose { // but if V logging is enabled we're slow anyway. logging.mu.Lock() defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { + if runtime.Callers(2+depth, logging.pcs[:]) == 0 { return newVerbose(level, false) } // runtime.Callers returns "return PCs", but we want diff --git a/vendor/k8s.io/klog/v2/klogr.go b/vendor/k8s.io/klog/v2/klogr.go index 027a4014..15de00e2 100644 --- a/vendor/k8s.io/klog/v2/klogr.go +++ b/vendor/k8s.io/klog/v2/klogr.go @@ -42,19 +42,21 @@ func (l *klogger) Init(info logr.RuntimeInfo) { l.callDepth += info.CallDepth } -func (l klogger) Info(level int, msg string, kvList ...interface{}) { +func (l *klogger) Info(level int, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg } - V(Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) + // Skip this function. + VDepth(l.callDepth+1, Level(level)).InfoSDepth(l.callDepth+1, msg, merged...) } -func (l klogger) Enabled(level int) bool { - return V(Level(level)).Enabled() +func (l *klogger) Enabled(level int) bool { + // Skip this function and logr.Logger.Info where Enabled is called. + return VDepth(l.callDepth+2, Level(level)).Enabled() } -func (l klogger) Error(err error, msg string, kvList ...interface{}) { +func (l *klogger) Error(err error, msg string, kvList ...interface{}) { merged := serialize.MergeKVs(l.values, kvList) if l.prefix != "" { msg = l.prefix + ": " + msg diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index 24f2b0e8..1a6c12e1 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -246,38 +246,42 @@ var schemaTypeFormatMap = map[string]typeInfo{ // the spec does not need to be simple type,format) or can even return a simple type,format (e.g. IntOrString). For simple // type formats, the benefit of adding OpenAPIDefinitionGetter interface is to keep both type and property documentation. // Example: -// type Sample struct { -// ... -// // port of the server -// port IntOrString -// ... -// } +// +// type Sample struct { +// ... +// // port of the server +// port IntOrString +// ... +// } +// // // IntOrString documentation... // type IntOrString { ... } // // Adding IntOrString to this function: -// "port" : { -// format: "string", -// type: "int-or-string", -// Description: "port of the server" -// } +// +// "port" : { +// format: "string", +// type: "int-or-string", +// Description: "port of the server" +// } // // Implement OpenAPIDefinitionGetter for IntOrString: // -// "port" : { -// $Ref: "#/definitions/IntOrString" -// Description: "port of the server" -// } +// "port" : { +// $Ref: "#/definitions/IntOrString" +// Description: "port of the server" +// } +// // ... // definitions: -// { -// "IntOrString": { -// format: "string", -// type: "int-or-string", -// Description: "IntOrString documentation..." // new -// } -// } // +// { +// "IntOrString": { +// format: "string", +// type: "int-or-string", +// Description: "IntOrString documentation..." // new +// } +// } func OpenAPITypeFormat(typeName string) (string, string) { mapped, ok := schemaTypeFormatMap[typeName] if !ok { diff --git a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go index ec4adcde..2c730f1b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go +++ b/vendor/k8s.io/kube-openapi/pkg/handler3/handler.go @@ -21,7 +21,6 @@ import ( "crypto/sha512" "encoding/json" "fmt" - "mime" "net/http" "net/url" "path" @@ -41,15 +40,9 @@ import ( ) const ( - jsonExt = ".json" - - mimeJson = "application/json" - // TODO(mehdy): change @68f4ded to a version tag when gnostic add version tags. - mimePb = "application/com.github.googleapis.gnostic.OpenAPIv3@68f4ded+protobuf" - mimePbGz = "application/x-gzip" - - subTypeProtobuf = "com.github.proto-openapi.spec.v3@v1.0+protobuf" - subTypeJSON = "json" + subTypeProtobufDeprecated = "com.github.proto-openapi.spec.v3@v1.0+protobuf" + subTypeProtobuf = "com.github.proto-openapi.spec.v3.v1.0+protobuf" + subTypeJSON = "json" ) // OpenAPIV3Discovery is the format of the Discovery document for OpenAPI V3 @@ -84,12 +77,6 @@ type OpenAPIV3Group struct { etagCache handler.HandlerCache } -func init() { - mime.AddExtensionType(".json", mimeJson) - mime.AddExtensionType(".pb-v1", mimePb) - mime.AddExtensionType(".gz", mimePbGz) -} - func computeETag(data []byte) string { if data == nil { return "" @@ -154,7 +141,7 @@ func (o *OpenAPIService) getSingleGroupBytes(getType string, group string) ([]by } etagBytes, err := v.etagCache.Get() return specBytes, string(etagBytes), v.lastModified, err - } else if getType == subTypeProtobuf { + } else if getType == subTypeProtobuf || getType == subTypeProtobufDeprecated { specPb, err := v.pbCache.Get() if err != nil { return nil, "", v.lastModified, err @@ -191,6 +178,8 @@ func ToV3ProtoBinary(json []byte) ([]byte, error) { func (o *OpenAPIService) HandleDiscovery(w http.ResponseWriter, r *http.Request) { data, _ := o.getGroupBytes() + w.Header().Set("Etag", strconv.Quote(computeETag(data))) + w.Header().Set("Content-Type", "application/json") http.ServeContent(w, r, "/openapi/v3", time.Now(), bytes.NewReader(data)) } @@ -210,11 +199,13 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque } accepted := []struct { - Type string - SubType string + Type string + SubType string + ReturnedContentType string }{ - {"application", subTypeJSON}, - {"application", subTypeProtobuf}, + {"application", subTypeJSON, "application/" + subTypeJSON}, + {"application", subTypeProtobuf, "application/" + subTypeProtobuf}, + {"application", subTypeProtobufDeprecated, "application/" + subTypeProtobuf}, } for _, clause := range clauses { @@ -229,6 +220,9 @@ func (o *OpenAPIService) HandleGroupVersion(w http.ResponseWriter, r *http.Reque if err != nil { return } + // Set Content-Type header in the reponse + w.Header().Set("Content-Type", accepts.ReturnedContentType) + // ETag must be enclosed in double quotes: https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/ETag w.Header().Set("Etag", strconv.Quote(etag)) diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go index 3ff3c8d8..b65203b6 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/flags.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/flags.go @@ -18,3 +18,7 @@ package internal // Used by tests to selectively disable experimental JSON unmarshaler var UseOptimizedJSONUnmarshaling bool = true +var UseOptimizedJSONUnmarshalingV3 bool = false + +// Used by tests to selectively disable experimental JSON marshaler +var UseOptimizedJSONMarshaling bool = true diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go new file mode 100644 index 00000000..7393bacf --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/internal/serialization.go @@ -0,0 +1,65 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "github.com/go-openapi/jsonreference" + jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" +) + +// DeterministicMarshal calls the jsonv2 library with the deterministic +// flag in order to have stable marshaling. +func DeterministicMarshal(in any) ([]byte, error) { + return jsonv2.MarshalOptions{Deterministic: true}.Marshal(jsonv2.EncodeOptions{}, in) +} + +// JSONRefFromMap populates a json reference object if the map v contains a $ref key. +func JSONRefFromMap(jsonRef *jsonreference.Ref, v map[string]interface{}) error { + if v == nil { + return nil + } + if vv, ok := v["$ref"]; ok { + if str, ok := vv.(string); ok { + ref, err := jsonreference.New(str) + if err != nil { + return err + } + *jsonRef = ref + } + } + return nil +} + +// SanitizeExtensions sanitizes the input map such that non extension +// keys (non x-*, X-*) keys are dropped from the map. Returns the new +// modified map, or nil if the map is now empty. +func SanitizeExtensions(e map[string]interface{}) map[string]interface{} { + for k := range e { + if !IsExtensionKey(k) { + delete(e, k) + } + } + if len(e) == 0 { + e = nil + } + return e +} + +// IsExtensionKey returns true if the input string is of format x-* or X-* +func IsExtensionKey(k string) bool { + return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' +} diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go index febde20f..e6c6216f 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal.go @@ -34,6 +34,13 @@ type MarshalOptions struct { // unknown JSON object members. DiscardUnknownMembers bool + // Deterministic specifies that the same input value will be serialized + // as the exact same output bytes. Different processes of + // the same program will serialize equal values to the same bytes, + // but different versions of the same program are not guaranteed + // to produce the exact same sequence of bytes. + Deterministic bool + // formatDepth is the depth at which we respect the format flag. formatDepth int // format is custom formatting for the value at the specified depth. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go index 204d0648..c62b1f32 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_any.go @@ -62,7 +62,7 @@ func unmarshalValueAny(uo UnmarshalOptions, dec *Decoder) (any, error) { } return dec.stringCache.make(val), nil case '0': - fv, _ := parseFloat(val, 64) // ignore error since readValue gaurantees val is valid + fv, _ := parseFloat(val, 64) // ignore error since readValue guarantees val is valid return fv, nil default: panic("BUG: invalid kind: " + k.String()) @@ -99,13 +99,32 @@ func marshalObjectAny(mo MarshalOptions, enc *Encoder, obj map[string]any) error if !enc.options.AllowInvalidUTF8 { enc.tokens.last.disableNamespace() } - for name, val := range obj { - if err := enc.WriteToken(String(name)); err != nil { - return err + if !mo.Deterministic || len(obj) <= 1 { + for name, val := range obj { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, val); err != nil { + return err + } } - if err := marshalValueAny(mo, enc, val); err != nil { - return err + } else { + names := getStrings(len(obj)) + var i int + for name := range obj { + (*names)[i] = name + i++ + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + if err := marshalValueAny(mo, enc, obj[name]); err != nil { + return err + } } + putStrings(names) } if err := enc.WriteToken(ObjectEnd); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go index fcf3d500..fd26eba3 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_default.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "encoding/base32" "encoding/base64" "encoding/hex" @@ -12,6 +13,7 @@ import ( "fmt" "math" "reflect" + "sort" "strconv" "sync" ) @@ -228,13 +230,7 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } } val := enc.UnusedBuffer() - var b []byte - if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() - } else { - b = va.Bytes() - } + b := va.Bytes() n := len(`"`) + encodedLen(len(b)) + len(`"`) if cap(val) < n { val = make([]byte, n) @@ -248,19 +244,19 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { } unmarshalDefault := fncs.unmarshal fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - decode, decodedLen := decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen := decodeBase64, decodedLenBase64, encodedLenBase64 if uo.format != "" && uo.formatDepth == dec.tokens.depth() { switch uo.format { case "base64": - decode, decodedLen = decodeBase64, decodedLenBase64 + decode, decodedLen, encodedLen = decodeBase64, decodedLenBase64, encodedLenBase64 case "base64url": - decode, decodedLen = decodeBase64URL, decodedLenBase64URL + decode, decodedLen, encodedLen = decodeBase64URL, decodedLenBase64URL, encodedLenBase64URL case "base32": - decode, decodedLen = decodeBase32, decodedLenBase32 + decode, decodedLen, encodedLen = decodeBase32, decodedLenBase32, encodedLenBase32 case "base32hex": - decode, decodedLen = decodeBase32Hex, decodedLenBase32Hex + decode, decodedLen, encodedLen = decodeBase32Hex, decodedLenBase32Hex, encodedLenBase32Hex case "base16", "hex": - decode, decodedLen = decodeBase16, decodedLenBase16 + decode, decodedLen, encodedLen = decodeBase16, decodedLenBase16, encodedLenBase16 case "array": uo.format = "" return unmarshalDefault(uo, dec, va) @@ -290,23 +286,28 @@ func makeBytesArshaler(t reflect.Type, fncs *arshaler) *arshaler { n-- } n = decodedLen(n) - var b []byte + b := va.Bytes() if va.Kind() == reflect.Array { - // TODO(https://go.dev/issue/47066): Avoid reflect.Value.Slice. - b = va.Slice(0, va.Len()).Bytes() if n != len(b) { err := fmt.Errorf("decoded base64 length of %d mismatches array length of %d", n, len(b)) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } } else { - b = va.Bytes() if b == nil || cap(b) < n { b = make([]byte, n) } else { b = b[:n] } } - if _, err := decode(b, val); err != nil { + n2, err := decode(b, val) + if err == nil && len(val) != encodedLen(n2) { + // TODO(https://go.dev/issue/53845): RFC 4648, section 3.3, + // specifies that non-alphabet characters must be rejected. + // Unfortunately, the "base32" and "base64" packages allow + // '\r' and '\n' characters by default. + err = errors.New("illegal data at input byte " + strconv.Itoa(bytes.IndexAny(val, "\r\n"))) + } + if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } if va.Kind() == reflect.Slice { @@ -412,7 +413,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { return nil } - x := math.Float64frombits(uint64(va.Uint())) + x := math.Float64frombits(va.Uint()) return enc.writeNumber(x, rawUintNumber, mo.StringifyNumbers) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { @@ -450,7 +451,7 @@ func makeUintArshaler(t reflect.Type) *arshaler { err := fmt.Errorf("cannot parse %q as unsigned integer: %w", val, strconv.ErrRange) return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } - va.SetUint(uint64(n)) + va.SetUint(n) return nil } return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t} @@ -549,23 +550,9 @@ func makeFloatArshaler(t reflect.Type) *arshaler { return &fncs } -var mapIterPool = sync.Pool{ - New: func() any { return new(reflect.MapIter) }, -} - -func getMapIter(mv reflect.Value) *reflect.MapIter { - iter := mapIterPool.Get().(*reflect.MapIter) - iter.Reset(mv) - return iter -} -func putMapIter(iter *reflect.MapIter) { - iter.Reset(reflect.Value{}) // allow underlying map to be garbage collected - mapIterPool.Put(iter) -} - func makeMapArshaler(t reflect.Type) *arshaler { // NOTE: The logic below disables namespaces for tracking duplicate names - // when handling map keys with a unique represention. + // when handling map keys with a unique representation. // NOTE: Values retrieved from a map are not addressable, // so we shallow copy the values to make them addressable and @@ -641,24 +628,76 @@ func makeMapArshaler(t reflect.Type) *arshaler { enc.tokens.last.disableNamespace() } - // NOTE: Map entries are serialized in a non-deterministic order. - // Users that need stable output should call RawValue.Canonicalize. - // TODO(go1.19): Remove use of a sync.Pool with reflect.MapIter. - // Calling reflect.Value.MapRange no longer allocates. - // See https://go.dev/cl/400675. - iter := getMapIter(va.Value) - defer putMapIter(iter) - for iter.Next() { - k.SetIterKey(iter) - if err := marshalKey(mko, enc, k); err != nil { - // TODO: If err is errMissingName, then wrap it as a - // SemanticError since this key type cannot be serialized - // as a JSON string. - return err + switch { + case !mo.Deterministic || n <= 1: + for iter := va.Value.MapRange(); iter.Next(); { + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + v.SetIterValue(iter) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } - v.SetIterValue(iter) - if err := marshalVal(mo, enc, v); err != nil { - return err + case !nonDefaultKey && t.Key().Kind() == reflect.String: + names := getStrings(n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + k.SetIterKey(iter) + (*names)[i] = k.String() + } + names.Sort() + for _, name := range *names { + if err := enc.WriteToken(String(name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + k.SetString(name) + v.Set(va.MapIndex(k.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } + } + putStrings(names) + default: + type member struct { + name string // unquoted name + key addressableValue + } + members := make([]member, n) + keys := reflect.MakeSlice(reflect.SliceOf(t.Key()), n, n) + for i, iter := 0, va.Value.MapRange(); i < n && iter.Next(); i++ { + // Marshal the member name. + k := addressableValue{keys.Index(i)} // indexed slice element is always addressable + k.SetIterKey(iter) + if err := marshalKey(mko, enc, k); err != nil { + // TODO: If err is errMissingName, then wrap it as a + // SemanticError since this key type cannot be serialized + // as a JSON string. + return err + } + name := enc.unwriteOnlyObjectMemberName() + members[i] = member{name, k} + } + // TODO: If AllowDuplicateNames is enabled, then sort according + // to reflect.Value as well if the names are equal. + // See internal/fmtsort. + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Slice(members, func(i, j int) bool { + return lessUTF16(members[i].name, members[j].name) + }) + for _, member := range members { + if err := enc.WriteToken(String(member.name)); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use v.SetMapIndexOf. + v.Set(va.MapIndex(member.key.Value)) + if err := marshalVal(mo, enc, v); err != nil { + return err + } } } } @@ -856,7 +895,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { // 2. The object namespace is guaranteed to be disabled. // 3. The object name is guaranteed to be valid and pre-escaped. // 4. There is no need to flush the buffer (for unwrite purposes). - // 5. There is no possibility of an error occuring. + // 5. There is no possibility of an error occurring. if optimizeCommon { // Append any delimiters or optional whitespace. if enc.tokens.last.length() > 0 { @@ -996,7 +1035,7 @@ func makeStructArshaler(t reflect.Type) *arshaler { if fields.inlinedFallback == nil { // Skip unknown value since we have no place to store it. - if err := dec.skipValue(); err != nil { + if err := dec.SkipValue(); err != nil { return err } } else { diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go index 7476eda3..258a9824 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_inlined.go @@ -5,6 +5,7 @@ package json import ( + "bytes" "errors" "reflect" ) @@ -89,35 +90,61 @@ func marshalInlinedFallbackAll(mo MarshalOptions, enc *Encoder, va addressableVa } return nil } else { - if v.Len() == 0 { + m := v // must be a map[string]V + n := m.Len() + if n == 0 { return nil } - m := v + mk := newAddressableValue(stringType) mv := newAddressableValue(m.Type().Elem()) - for iter := m.MapRange(); iter.Next(); { - b, err := appendString(enc.UnusedBuffer(), iter.Key().String(), !enc.options.AllowInvalidUTF8, nil) + marshalKey := func(mk addressableValue) error { + b, err := appendString(enc.UnusedBuffer(), mk.String(), !enc.options.AllowInvalidUTF8, nil) if err != nil { return err } if insertUnquotedName != nil { - isVerbatim := consumeSimpleString(b) == len(b) + isVerbatim := bytes.IndexByte(b, '\\') < 0 name := unescapeStringMayCopy(b, isVerbatim) if !insertUnquotedName(name) { return &SyntacticError{str: "duplicate name " + string(b) + " in object"} } } - if err := enc.WriteValue(b); err != nil { - return err + return enc.WriteValue(b) + } + marshalVal := f.fncs.marshal + if mo.Marshalers != nil { + marshalVal, _ = mo.Marshalers.lookup(marshalVal, mv.Type()) + } + if !mo.Deterministic || n <= 1 { + for iter := m.MapRange(); iter.Next(); { + mk.SetIterKey(iter) + if err := marshalKey(mk); err != nil { + return err + } + mv.Set(iter.Value()) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } - - mv.Set(iter.Value()) - marshal := f.fncs.marshal - if mo.Marshalers != nil { - marshal, _ = mo.Marshalers.lookup(marshal, mv.Type()) + } else { + names := getStrings(n) + for i, iter := 0, m.Value.MapRange(); i < n && iter.Next(); i++ { + mk.SetIterKey(iter) + (*names)[i] = mk.String() } - if err := marshal(mo, enc, mv); err != nil { - return err + names.Sort() + for _, name := range *names { + mk.SetString(name) + if err := marshalKey(mk); err != nil { + return err + } + // TODO(https://go.dev/issue/57061): Use mv.SetMapIndexOf. + mv.Set(m.MapIndex(mk.Value)) + if err := marshalVal(mo, enc, mv); err != nil { + return err + } } + putStrings(names) } return nil } @@ -162,7 +189,7 @@ func unmarshalInlinedFallbackNext(uo UnmarshalOptions, dec *Decoder, va addressa } else { name := string(unquotedName) // TODO: Intern this? - m := v + m := v // must be a map[string]V if m.IsNil() { m.Set(reflect.MakeMap(m.Type())) } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go index ef4e1f5e..20899c86 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_methods.go @@ -21,8 +21,8 @@ var ( ) // MarshalerV1 is implemented by types that can marshal themselves. -// It is recommended that types implement MarshalerV2 unless -// the implementation is trying to avoid a hard dependency on this package. +// It is recommended that types implement MarshalerV2 unless the implementation +// is trying to avoid a hard dependency on the "jsontext" package. // // It is recommended that implementations return a buffer that is safe // for the caller to retain and potentially mutate. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go index 22e80222..fc8d5b00 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/arshal_time.go @@ -5,6 +5,7 @@ package json import ( + "errors" "fmt" "reflect" "strings" @@ -85,25 +86,39 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { fncs.nonDefault = true fncs.marshal = func(mo MarshalOptions, enc *Encoder, va addressableValue) error { format := time.RFC3339Nano + isRFC3339 := true if mo.format != "" && mo.formatDepth == enc.tokens.depth() { var err error - format, err = checkTimeFormat(mo.format) + format, isRFC3339, err = checkTimeFormat(mo.format) if err != nil { return &SemanticError{action: "marshal", GoType: t, Err: err} } } tt := va.Interface().(time.Time) - if y := tt.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See https://go.dev/issue/4556#c15 for more discussion. - err := fmt.Errorf("year %d outside of range [0,9999]", y) - return &SemanticError{action: "marshal", GoType: t, Err: err} - } b := enc.UnusedBuffer() b = append(b, '"') b = tt.AppendFormat(b, format) b = append(b, '"') + if isRFC3339 { + // Not all Go timestamps can be represented as valid RFC 3339. + // Explicitly check for these edge cases. + // See https://go.dev/issue/4556 and https://go.dev/issue/54580. + var err error + switch b := b[len(`"`) : len(b)-len(`"`)]; { + case b[len("9999")] != '-': // year must be exactly 4 digits wide + err = errors.New("year outside of range [0,9999]") + case b[len(b)-1] != 'Z': + c := b[len(b)-len("Z07:00")] + if ('0' <= c && c <= '9') || parseDec2(b[len(b)-len("07:00"):]) >= 24 { + err = errors.New("timezone hour outside of range [0,23]") + } + } + if err != nil { + return &SemanticError{action: "marshal", GoType: t, Err: err} + } + return enc.WriteValue(b) // RFC 3339 never needs JSON escaping + } // The format may contain special characters that need escaping. // Verify that the result is a valid JSON string (common case), // otherwise escape the string correctly (slower case). @@ -113,10 +128,11 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return enc.WriteValue(b) } fncs.unmarshal = func(uo UnmarshalOptions, dec *Decoder, va addressableValue) error { - format := time.RFC3339Nano + format := time.RFC3339 + isRFC3339 := true if uo.format != "" && uo.formatDepth == dec.tokens.depth() { var err error - format, err = checkTimeFormat(uo.format) + format, isRFC3339, err = checkTimeFormat(uo.format) if err != nil { return &SemanticError{action: "unmarshal", GoType: t, Err: err} } @@ -136,6 +152,29 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { case '"': val = unescapeStringMayCopy(val, flags.isVerbatim()) tt2, err := time.Parse(format, string(val)) + if isRFC3339 && err == nil { + // TODO(https://go.dev/issue/54580): RFC 3339 specifies + // the exact grammar of a valid timestamp. However, + // the parsing functionality in "time" is too loose and + // incorrectly accepts invalid timestamps as valid. + // Remove these manual checks when "time" checks it for us. + newParseError := func(layout, value, layoutElem, valueElem, message string) error { + return &time.ParseError{Layout: layout, Value: value, LayoutElem: layoutElem, ValueElem: valueElem, Message: message} + } + switch { + case val[len("2006-01-02T")+1] == ':': // hour must be two digits + err = newParseError(format, string(val), "15", string(val[len("2006-01-02T"):][:1]), "") + case val[len("2006-01-02T15:04:05")] == ',': // sub-second separator must be a period + err = newParseError(format, string(val), ".", ",", "") + case val[len(val)-1] != 'Z': + switch { + case parseDec2(val[len(val)-len("07:00"):]) >= 24: // timezone hour must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone hour out of range") + case parseDec2(val[len(val)-len("00"):]) >= 60: // timezone minute must be in range + err = newParseError(format, string(val), "Z07:00", string(val[len(val)-len("Z07:00"):]), ": timezone minute out of range") + } + } + } if err != nil { return &SemanticError{action: "unmarshal", JSONKind: k, GoType: t, Err: err} } @@ -149,48 +188,54 @@ func makeTimeArshaler(fncs *arshaler, t reflect.Type) *arshaler { return fncs } -func checkTimeFormat(format string) (string, error) { +func checkTimeFormat(format string) (string, bool, error) { // We assume that an exported constant in the time package will // always start with an uppercase ASCII letter. if len(format) > 0 && 'A' <= format[0] && format[0] <= 'Z' { switch format { case "ANSIC": - return time.ANSIC, nil + return time.ANSIC, false, nil case "UnixDate": - return time.UnixDate, nil + return time.UnixDate, false, nil case "RubyDate": - return time.RubyDate, nil + return time.RubyDate, false, nil case "RFC822": - return time.RFC822, nil + return time.RFC822, false, nil case "RFC822Z": - return time.RFC822Z, nil + return time.RFC822Z, false, nil case "RFC850": - return time.RFC850, nil + return time.RFC850, false, nil case "RFC1123": - return time.RFC1123, nil + return time.RFC1123, false, nil case "RFC1123Z": - return time.RFC1123Z, nil + return time.RFC1123Z, false, nil case "RFC3339": - return time.RFC3339, nil + return time.RFC3339, true, nil case "RFC3339Nano": - return time.RFC3339Nano, nil + return time.RFC3339Nano, true, nil case "Kitchen": - return time.Kitchen, nil + return time.Kitchen, false, nil case "Stamp": - return time.Stamp, nil + return time.Stamp, false, nil case "StampMilli": - return time.StampMilli, nil + return time.StampMilli, false, nil case "StampMicro": - return time.StampMicro, nil + return time.StampMicro, false, nil case "StampNano": - return time.StampNano, nil + return time.StampNano, false, nil default: // Reject any format that is an exported Go identifier in case // new format constants are added to the time package. if strings.TrimFunc(format, isLetterOrDigit) == "" { - return "", fmt.Errorf("undefined format layout: %v", format) + return "", false, fmt.Errorf("undefined format layout: %v", format) } } } - return format, nil + return format, false, nil +} + +// parseDec2 parses b as an unsigned, base-10, 2-digit number. +// It panics if len(b) < 2. The result is undefined if digits are not base-10. +func parseDec2(b []byte) byte { + return 10*(b[0]-'0') + (b[1] - '0') } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go index 998ad68f..0d68b323 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/decode.go @@ -347,9 +347,9 @@ func (d *Decoder) PeekKind() Kind { return next } -// skipValue is semantically equivalent to calling ReadValue and discarding +// SkipValue is semantically equivalent to calling ReadValue and discarding // the result except that memory is not wasted trying to hold the entire result. -func (d *Decoder) skipValue() error { +func (d *Decoder) SkipValue() error { switch d.PeekKind() { case '{', '[': // For JSON objects and arrays, keep skipping all tokens @@ -374,7 +374,7 @@ func (d *Decoder) skipValue() error { } // ReadToken reads the next Token, advancing the read offset. -// The returned token is only valid until the next Peek or Read call. +// The returned token is only valid until the next Peek, Read, or Skip call. // It returns io.EOF if there are no more tokens. func (d *Decoder) ReadToken() (Token, error) { // Determine the next kind. @@ -585,7 +585,7 @@ func (f valueFlags) isCanonical() bool { return f&stringNonCanonical == 0 } // ReadValue returns the next raw JSON value, advancing the read offset. // The value is stripped of any leading or trailing whitespace. -// The returned value is only valid until the next Peek or Read call and +// The returned value is only valid until the next Peek, Read, or Skip call and // may not be mutated while the Decoder remains in use. // If the decoder is currently at the end token for an object or array, // then it reports a SyntacticError and the internal state remains unchanged. @@ -1013,7 +1013,7 @@ func (d *Decoder) InputOffset() int64 { // UnreadBuffer returns the data remaining in the unread buffer, // which may contain zero or more bytes. // The returned buffer must not be mutated while Decoder continues to be used. -// The buffer contents are valid until the next Peek or Read call. +// The buffer contents are valid until the next Peek, Read, or Skip call. func (d *Decoder) UnreadBuffer() []byte { return d.unreadBuffer() } @@ -1213,7 +1213,7 @@ func consumeStringResumable(flags *valueFlags, b []byte, resumeOffset int, valid return n, &SyntacticError{str: "invalid escape sequence " + strconv.Quote(string(b[n:n+6])) + " within string"} } // Only certain control characters can use the \uFFFF notation - // for canonical formating (per RFC 8785, section 3.2.2.2.). + // for canonical formatting (per RFC 8785, section 3.2.2.2.). switch v1 { // \uFFFF notation not permitted for these characters. case '\b', '\f', '\n', '\r', '\t': diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go index ba4af4b7..e4eefa3d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/doc.go @@ -8,8 +8,7 @@ // primitive data types such as booleans, strings, and numbers, // in addition to structured data types such as objects and arrays. // -// -// Terminology +// # Terminology // // This package uses the terms "encode" and "decode" for syntactic functionality // that is concerned with processing JSON based on its grammar, and @@ -32,8 +31,7 @@ // // See RFC 8259 for more information. // -// -// Specifications +// # Specifications // // Relevant specifications include RFC 4627, RFC 7159, RFC 7493, RFC 8259, // and RFC 8785. Each RFC is generally a stricter subset of another RFC. @@ -60,8 +58,7 @@ // In particular, it makes specific choices about behavior that RFC 8259 // leaves as undefined in order to ensure greater interoperability. // -// -// JSON Representation of Go structs +// # JSON Representation of Go structs // // A Go struct is naturally represented as a JSON object, // where each Go struct field corresponds with a JSON object member. diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go index 5f98a840..5b81ca15 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/encode.go @@ -347,6 +347,30 @@ func (e *Encoder) unwriteEmptyObjectMember(prevName *string) bool { return true } +// unwriteOnlyObjectMemberName unwrites the only object member name +// and returns the unquoted name. +func (e *Encoder) unwriteOnlyObjectMemberName() string { + if last := e.tokens.last; !last.isObject() || last.length() != 1 { + panic("BUG: must be called on an object after writing first name") + } + + // Unwrite the name and whitespace. + b := trimSuffixString(e.buf) + isVerbatim := bytes.IndexByte(e.buf[len(b):], '\\') < 0 + name := string(unescapeStringMayCopy(e.buf[len(b):], isVerbatim)) + e.buf = trimSuffixWhitespace(b) + + // Undo state changes. + e.tokens.last.decrement() + if !e.options.AllowDuplicateNames { + if e.tokens.last.isActiveNamespace() { + e.namespaces.last().removeLast() + } + e.names.clearLast() + } + return name +} + func trimSuffixWhitespace(b []byte) []byte { // NOTE: The arguments and logic are kept simple to keep this inlineable. n := len(b) - 1 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go index f7228221..60e93270 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/pools.go @@ -8,6 +8,7 @@ import ( "bytes" "io" "math/bits" + "sort" "sync" ) @@ -148,3 +149,34 @@ func putStreamingDecoder(d *Decoder) { streamingDecoderPool.Put(d) } } + +var stringsPools = &sync.Pool{New: func() any { return new(stringSlice) }} + +type stringSlice []string + +// getStrings returns a non-nil pointer to a slice with length n. +func getStrings(n int) *stringSlice { + s := stringsPools.Get().(*stringSlice) + if cap(*s) < n { + *s = make([]string, n) + } + *s = (*s)[:n] + return s +} + +func putStrings(s *stringSlice) { + if cap(*s) > 1<<10 { + *s = nil // avoid pinning arbitrarily large amounts of memory + } + stringsPools.Put(s) +} + +// Sort sorts the string slice according to RFC 8785, section 3.2.3. +func (ss *stringSlice) Sort() { + // TODO(https://go.dev/issue/47619): Use slices.SortFunc instead. + sort.Sort(ss) +} + +func (ss *stringSlice) Len() int { return len(*ss) } +func (ss *stringSlice) Less(i, j int) bool { return lessUTF16((*ss)[i], (*ss)[j]) } +func (ss *stringSlice) Swap(i, j int) { (*ss)[i], (*ss)[j] = (*ss)[j], (*ss)[i] } diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go index d9c33f2b..ee14c753 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/state.go @@ -721,7 +721,7 @@ func (s *uintSet) has(i uint) bool { return s.lo.has(i) } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 return iHi < len(s.hi) && s.hi[iHi].has(iLo) } } @@ -735,7 +735,7 @@ func (s *uintSet) insert(i uint) bool { return !has } else { i -= 64 - iHi, iLo := int(i/64), uint(i%64) + iHi, iLo := int(i/64), i%64 if iHi >= len(s.hi) { s.hi = append(s.hi, make([]uintSet64, iHi+1-len(s.hi))...) s.hi = s.hi[:cap(s.hi)] diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go index 08509c29..9acba7da 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/token.go @@ -112,7 +112,7 @@ func Bool(b bool) Token { return False } -// String construct a Token representing a JSON string. +// String constructs a Token representing a JSON string. // The provided string should contain valid UTF-8, otherwise invalid characters // may be mangled as the Unicode replacement character. func String(s string) Token { @@ -225,7 +225,7 @@ func (t Token) appendString(dst []byte, validateUTF8, preserveRaw bool, escapeRu } // String returns the unescaped string value for a JSON string. -// For other JSON kinds, this returns the raw JSON represention. +// For other JSON kinds, this returns the raw JSON representation. func (t Token) String() string { // This is inlinable to take advantage of "function outlining". // This avoids an allocation for the string(b) conversion @@ -373,10 +373,10 @@ func (t Token) Int() int64 { case 'i': return int64(t.num) case 'u': - if uint64(t.num) > maxInt64 { + if t.num > maxInt64 { return maxInt64 } - return int64(uint64(t.num)) + return int64(t.num) } } @@ -425,7 +425,7 @@ func (t Token) Uint() uint64 { // Handle exact integer value. switch t.str[0] { case 'u': - return uint64(t.num) + return t.num case 'i': if int64(t.num) < minUint64 { return minUint64 diff --git a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go index fe88e4fb..e0bd1b31 100644 --- a/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go +++ b/vendor/k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json/value.go @@ -263,7 +263,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { afterValue := d.InputOffset() if isSorted && len(*members) > 0 { - isSorted = lessUTF16(prevName, name) + isSorted = lessUTF16(prevName, []byte(name)) } *members = append(*members, memberName{name, beforeName, afterValue}) prevName = name @@ -317,7 +317,7 @@ func reorderObjects(d *Decoder, scratch *[]byte) { // to the UTF-16 codepoints of the UTF-8 encoded input strings. // This implements the ordering specified in RFC 8785, section 3.2.3. // The inputs must be valid UTF-8, otherwise this may panic. -func lessUTF16(x, y []byte) bool { +func lessUTF16[Bytes []byte | string](x, y Bytes) bool { // NOTE: This is an optimized, allocation-free implementation // of lessUTF16Simple in fuzz_test.go. FuzzLessUTF16 verifies that the // two implementations agree on the result of comparing any two strings. @@ -326,8 +326,13 @@ func lessUTF16(x, y []byte) bool { return ('\u0000' <= r && r <= '\uD7FF') || ('\uE000' <= r && r <= '\uFFFF') } + var invalidUTF8 bool + x0, y0 := x, y for { if len(x) == 0 || len(y) == 0 { + if len(x) == len(y) && invalidUTF8 { + return string(x0) < string(y0) + } return len(x) < len(y) } @@ -341,35 +346,36 @@ func lessUTF16(x, y []byte) bool { } // Decode next pair of runes as UTF-8. - rx, nx := utf8.DecodeRune(x) - ry, ny := utf8.DecodeRune(y) - switch { - - // Both runes encode as either a single or surrogate pair - // of UTF-16 codepoints. - case isUTF16Self(rx) == isUTF16Self(ry): - if rx != ry { - return rx < ry - } + // TODO(https://go.dev/issue/56948): Use a generic implementation + // of utf8.DecodeRune, or rely on a compiler optimization to statically + // hide the cost of a type switch (https://go.dev/issue/57072). + var rx, ry rune + var nx, ny int + switch any(x).(type) { + case string: + rx, nx = utf8.DecodeRuneInString(string(x)) + ry, ny = utf8.DecodeRuneInString(string(y)) + case []byte: + rx, nx = utf8.DecodeRune([]byte(x)) + ry, ny = utf8.DecodeRune([]byte(y)) + } + selfx := isUTF16Self(rx) + selfy := isUTF16Self(ry) + switch { // The x rune is a single UTF-16 codepoint, while // the y rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(rx): - ry, _ := utf16.EncodeRune(ry) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies rx is an unpaired surrogate half - + case selfx && !selfy: + ry, _ = utf16.EncodeRune(ry) // The y rune is a single UTF-16 codepoint, while // the x rune is a surrogate pair of UTF-16 codepoints. - case isUTF16Self(ry): - rx, _ := utf16.EncodeRune(rx) - if rx != ry { - return rx < ry - } - panic("BUG: invalid UTF-8") // implies ry is an unpaired surrogate half + case selfy && !selfx: + rx, _ = utf16.EncodeRune(rx) + } + if rx != ry { + return rx < ry } + invalidUTF8 = invalidUTF8 || (rx == utf8.RuneError && nx == 1) || (ry == utf8.RuneError && ny == 1) x, y = x[nx:], y[ny:] } } diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go new file mode 100644 index 00000000..61141a50 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/openapi.go @@ -0,0 +1,260 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/validation/spec" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchemaFromOpenAPI converts a directory of OpenAPI schemas to an smd Schema. +// - models: a map from definition name to OpenAPI V3 structural schema for each definition. +// Key in map is used to resolve references in the schema. +// - preserveUnknownFields: flag indicating whether unknown fields in all schemas should be preserved. +// - returns: nil and an error if there is a parse error, or if schema does not satisfy a +// required structural schema invariant for conversion. If no error, returns +// a new smd schema. +// +// Schema should be validated as structural before using with this function, or +// there may be information lost. +func ToSchemaFromOpenAPI(models map[string]*spec.Schema, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + + for name, spec := range models { + // Skip/Ignore top-level references + if len(spec.Ref.String()) > 0 { + continue + } + + var a schema.Atom + + // Hard-coded schemas for now as proto_models implementation functions. + // https://github.com/kubernetes/kube-openapi/issues/364 + if name == quantityResource { + a = schema.Atom{ + Scalar: untypedDef.Atom.Scalar, + } + } else if name == rawExtensionResource { + a = untypedDef.Atom + } else { + c2 := c.push(name, &a) + c2.visitSpec(spec) + c.pop(c2) + } + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) visitSpec(m *spec.Schema) { + // Check if this schema opts its descendants into preserve-unknown-fields + if p, ok := m.Extensions["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + c.preserveUnknownFields = true + } + a := c.top() + *a = c.parseSchema(m) +} + +func (c *convert) parseSchema(m *spec.Schema) schema.Atom { + // k8s-generated OpenAPI specs have historically used only one value for + // type and starting with OpenAPIV3 it is only allowed to be + // a single string. + typ := "" + if len(m.Type) > 0 { + typ = m.Type[0] + } + + // Structural Schemas produced by kubernetes follow very specific rules which + // we can use to infer the SMD type: + switch typ { + case "": + // According to Swagger docs: + // https://swagger.io/docs/specification/data-models/data-types/#any + // + // If no type is specified, it is equivalent to accepting any type. + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + List: c.parseList(m), + Map: c.parseObject(m), + } + + case "object": + return schema.Atom{ + Map: c.parseObject(m), + } + case "array": + return schema.Atom{ + List: c.parseList(m), + } + case "integer", "boolean", "number", "string": + return convertPrimitive(typ, m.Format) + default: + c.reportError("unrecognized type: '%v'", typ) + return schema.Atom{ + Scalar: ptr(schema.Scalar("untyped")), + } + } +} + +func (c *convert) makeOpenAPIRef(specSchema *spec.Schema) schema.TypeRef { + refString := specSchema.Ref.String() + + // Special-case handling for $ref stored inside a single-element allOf + if len(refString) == 0 && len(specSchema.AllOf) == 1 && len(specSchema.AllOf[0].Ref.String()) > 0 { + refString = specSchema.AllOf[0].Ref.String() + } + + if _, n := path.Split(refString); len(n) > 0 { + //!TODO: Refactor the field ElementRelationship override + // we can generate the types with overrides ahead of time rather than + // requiring the hacky runtime support + // (could just create a normalized key struct containing all customizations + // to deduplicate) + mapRelationship, err := getMapElementRelationship(specSchema.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + if len(mapRelationship) > 0 { + return schema.TypeRef{ + NamedType: &n, + ElementRelationship: &mapRelationship, + } + } + + return schema.TypeRef{ + NamedType: &n, + } + + } + var inlined schema.Atom + + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &inlined) + c2.preserveUnknownFields = c.preserveUnknownFields + c2.visitSpec(specSchema) + c.pop(c2) + + return schema.TypeRef{ + Inlined: inlined, + } +} + +func (c *convert) parseObject(s *spec.Schema) *schema.Map { + var fields []schema.StructField + for name, member := range s.Properties { + fields = append(fields, schema.StructField{ + Name: name, + Type: c.makeOpenAPIRef(&member), + Default: member.Default, + }) + } + + // AdditionalProperties informs the schema of any "unknown" keys + // Unknown keys are enforced by the ElementType field. + elementType := func() schema.TypeRef { + if s.AdditionalProperties == nil { + // According to openAPI spec, an object without properties and without + // additionalProperties is assumed to be a free-form object. + if c.preserveUnknownFields || len(s.Properties) == 0 { + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + // If properties are specified, do not implicitly allow unknown + // fields + return schema.TypeRef{} + } else if s.AdditionalProperties.Schema != nil { + // Unknown fields use the referred schema + return c.makeOpenAPIRef(s.AdditionalProperties.Schema) + + } else if s.AdditionalProperties.Allows { + // A boolean instead of a schema was provided. Deduce the + // type from the value provided at runtime. + return schema.TypeRef{ + NamedType: &deducedName, + } + } else { + // Additional Properties are explicitly disallowed by the user. + // Ensure element type is empty. + return schema.TypeRef{} + } + }() + + relationship, err := getMapElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + + return &schema.Map{ + Fields: fields, + ElementRelationship: relationship, + ElementType: elementType, + } +} + +func (c *convert) parseList(s *spec.Schema) *schema.List { + relationship, mapKeys, err := getListElementRelationship(s.Extensions) + if err != nil { + c.reportError(err.Error()) + } + elementType := func() schema.TypeRef { + if s.Items != nil { + if s.Items.Schema == nil || s.Items.Len() != 1 { + c.reportError("structural schema arrays must have exactly one member subtype") + return schema.TypeRef{ + NamedType: &deducedName, + } + } + + subSchema := s.Items.Schema + if subSchema == nil { + subSchema = &s.Items.Schemas[0] + } + return c.makeOpenAPIRef(subSchema) + } else if len(s.Type) > 0 && len(s.Type[0]) > 0 { + c.reportError("`items` must be specified on arrays") + } + + // A list with no items specified is treated as "untyped". + return schema.TypeRef{ + NamedType: &untypedName, + } + + }() + + return &schema.List{ + ElementRelationship: relationship, + Keys: mapKeys, + ElementType: elementType, + } +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go new file mode 100644 index 00000000..2c6fd76a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/proto_models.go @@ -0,0 +1,178 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schemaconv + +import ( + "errors" + "path" + "strings" + + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/structured-merge-diff/v4/schema" +) + +// ToSchema converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2). +func ToSchema(models proto.Models) (*schema.Schema, error) { + return ToSchemaWithPreserveUnknownFields(models, false) +} + +// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured +// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. +func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { + c := convert{ + preserveUnknownFields: preserveUnknownFields, + output: &schema.Schema{}, + } + for _, name := range models.ListModels() { + model := models.LookupModel(name) + + var a schema.Atom + c2 := c.push(name, &a) + model.Accept(c2) + c.pop(c2) + + c.insertTypeDef(name, a) + } + + if len(c.errorMessages) > 0 { + return nil, errors.New(strings.Join(c.errorMessages, "\n")) + } + + c.addCommonTypes() + return c.output, nil +} + +func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { + var tr schema.TypeRef + if r, ok := model.(*proto.Ref); ok { + if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { + return schema.TypeRef{ + NamedType: &untypedName, + } + } + // reference a named type + _, n := path.Split(r.Reference()) + tr.NamedType = &n + + mapRelationship, err := getMapElementRelationship(model.GetExtensions()) + + if err != nil { + c.reportError(err.Error()) + } + + // empty string means unset. + if len(mapRelationship) > 0 { + tr.ElementRelationship = &mapRelationship + } + } else { + // compute the type inline + c2 := c.push("inlined in "+c.currentName, &tr.Inlined) + c2.preserveUnknownFields = preserveUnknownFields + model.Accept(c2) + c.pop(c2) + + if tr == (schema.TypeRef{}) { + // emit warning? + tr.NamedType = &untypedName + } + } + return tr +} + +func (c *convert) VisitKind(k *proto.Kind) { + preserveUnknownFields := c.preserveUnknownFields + if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { + preserveUnknownFields = true + } + + a := c.top() + a.Map = &schema.Map{} + for _, name := range k.FieldOrder { + member := k.Fields[name] + tr := c.makeRef(member, preserveUnknownFields) + a.Map.Fields = append(a.Map.Fields, schema.StructField{ + Name: name, + Type: tr, + Default: member.GetDefault(), + }) + } + + unions, err := makeUnions(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + return + } + // TODO: We should check that the fields and discriminator + // specified in the union are actual fields in the struct. + a.Map.Unions = unions + + if preserveUnknownFields { + a.Map.ElementType = schema.TypeRef{ + NamedType: &deducedName, + } + } + + a.Map.ElementRelationship, err = getMapElementRelationship(k.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } +} + +func (c *convert) VisitArray(a *proto.Array) { + relationship, mapKeys, err := getListElementRelationship(a.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + atom := c.top() + atom.List = &schema.List{ + ElementType: c.makeRef(a.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + Keys: mapKeys, + } +} + +func (c *convert) VisitMap(m *proto.Map) { + relationship, err := getMapElementRelationship(m.GetExtensions()) + if err != nil { + c.reportError(err.Error()) + } + + a := c.top() + a.Map = &schema.Map{ + ElementType: c.makeRef(m.SubType, c.preserveUnknownFields), + ElementRelationship: relationship, + } +} + +func (c *convert) VisitPrimitive(p *proto.Primitive) { + a := c.top() + if c.currentName == quantityResource { + a.Scalar = ptr(schema.Scalar("untyped")) + } else { + *a = convertPrimitive(p.Type, p.Format) + } +} + +func (c *convert) VisitArbitrary(a *proto.Arbitrary) { + *c.top() = deducedDef.Atom +} + +func (c *convert) VisitReference(proto.Reference) { + // Do nothing, we handle references specially +} diff --git a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go index bec0e780..799d866d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go +++ b/vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go @@ -17,43 +17,18 @@ limitations under the License. package schemaconv import ( - "errors" "fmt" - "path" "sort" - "strings" - "k8s.io/kube-openapi/pkg/util/proto" "sigs.k8s.io/structured-merge-diff/v4/schema" ) const ( - quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + quantityResource = "io.k8s.apimachinery.pkg.api.resource.Quantity" + rawExtensionResource = "io.k8s.apimachinery.pkg.runtime.RawExtension" ) -// ToSchema converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2). -func ToSchema(models proto.Models) (*schema.Schema, error) { - return ToSchemaWithPreserveUnknownFields(models, false) -} - -// ToSchemaWithPreserveUnknownFields converts openapi definitions into a schema suitable for structured -// merge (i.e. kubectl apply v2), it will preserve unknown fields if specified. -func ToSchemaWithPreserveUnknownFields(models proto.Models, preserveUnknownFields bool) (*schema.Schema, error) { - c := convert{ - input: models, - preserveUnknownFields: preserveUnknownFields, - output: &schema.Schema{}, - } - if err := c.convertAll(); err != nil { - return nil, err - } - c.addCommonTypes() - return c.output, nil -} - type convert struct { - input proto.Models preserveUnknownFields bool output *schema.Schema @@ -64,7 +39,6 @@ type convert struct { func (c *convert) push(name string, a *schema.Atom) *convert { return &convert{ - input: c.input, preserveUnknownFields: c.preserveUnknownFields, output: c.output, currentName: name, @@ -78,30 +52,17 @@ func (c *convert) pop(c2 *convert) { c.errorMessages = append(c.errorMessages, c2.errorMessages...) } -func (c *convert) convertAll() error { - for _, name := range c.input.ListModels() { - model := c.input.LookupModel(name) - c.insertTypeDef(name, model) - } - if len(c.errorMessages) > 0 { - return errors.New(strings.Join(c.errorMessages, "\n")) - } - return nil -} - func (c *convert) reportError(format string, args ...interface{}) { c.errorMessages = append(c.errorMessages, c.currentName+": "+fmt.Sprintf(format, args...), ) } -func (c *convert) insertTypeDef(name string, model proto.Schema) { +func (c *convert) insertTypeDef(name string, atom schema.Atom) { def := schema.TypeDef{ Name: name, + Atom: atom, } - c2 := c.push(name, &def.Atom) - model.Accept(c2) - c.pop(c2) if def.Atom == (schema.Atom{}) { // This could happen if there were a top-level reference. return @@ -156,46 +117,6 @@ var deducedDef schema.TypeDef = schema.TypeDef{ }, } -func (c *convert) makeRef(model proto.Schema, preserveUnknownFields bool) schema.TypeRef { - var tr schema.TypeRef - if r, ok := model.(*proto.Ref); ok { - if r.Reference() == "io.k8s.apimachinery.pkg.runtime.RawExtension" { - return schema.TypeRef{ - NamedType: &untypedName, - } - } - // reference a named type - _, n := path.Split(r.Reference()) - tr.NamedType = &n - - ext := model.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - relationship := schema.Atomic - tr.ElementRelationship = &relationship - case "granular": - relationship := schema.Separable - tr.ElementRelationship = &relationship - default: - c.reportError("unknown map type %v", val) - } - } - } else { - // compute the type inline - c2 := c.push("inlined in "+c.currentName, &tr.Inlined) - c2.preserveUnknownFields = preserveUnknownFields - model.Accept(c2) - c.pop(c2) - - if tr == (schema.TypeRef{}) { - // emit warning? - tr.NamedType = &untypedName - } - } - return tr -} - func makeUnions(extensions map[string]interface{}) ([]schema.Union, error) { schemaUnions := []schema.Union{} if iunions, ok := extensions["x-kubernetes-unions"]; ok { @@ -299,52 +220,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) { return union, nil } -func (c *convert) VisitKind(k *proto.Kind) { - preserveUnknownFields := c.preserveUnknownFields - if p, ok := k.GetExtensions()["x-kubernetes-preserve-unknown-fields"]; ok && p == true { - preserveUnknownFields = true - } - - a := c.top() - a.Map = &schema.Map{} - for _, name := range k.FieldOrder { - member := k.Fields[name] - tr := c.makeRef(member, preserveUnknownFields) - a.Map.Fields = append(a.Map.Fields, schema.StructField{ - Name: name, - Type: tr, - Default: member.GetDefault(), - }) - } - - unions, err := makeUnions(k.GetExtensions()) - if err != nil { - c.reportError(err.Error()) - return - } - // TODO: We should check that the fields and discriminator - // specified in the union are actual fields in the struct. - a.Map.Unions = unions - - if preserveUnknownFields { - a.Map.ElementType = schema.TypeRef{ - NamedType: &deducedName, - } - } - - ext := k.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { - switch val { - case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable - default: - c.reportError("unknown map type %v", val) - } - } -} - func toStringSlice(o interface{}) (out []string, ok bool) { switch t := o.(type) { case []interface{}: @@ -355,117 +230,108 @@ func toStringSlice(o interface{}) (out []string, ok bool) { } } return out, true + case []string: + return t, true } return nil, false } -func (c *convert) VisitArray(a *proto.Array) { - atom := c.top() - atom.List = &schema.List{ - ElementRelationship: schema.Atomic, - } - l := atom.List - l.ElementType = c.makeRef(a.SubType, c.preserveUnknownFields) - - ext := a.GetExtensions() +func ptr(s schema.Scalar) *schema.Scalar { return &s } - if val, ok := ext["x-kubernetes-list-type"]; ok { - if val == "atomic" { - l.ElementRelationship = schema.Atomic - } else if val == "set" { - l.ElementRelationship = schema.Associative - } else if val == "map" { - l.ElementRelationship = schema.Associative - if keys, ok := ext["x-kubernetes-list-map-keys"]; ok { - if keyNames, ok := toStringSlice(keys); ok { - l.Keys = keyNames - } else { - c.reportError("uninterpreted map keys: %#v", keys) - } - } else { - c.reportError("missing map keys") - } - } else { - c.reportError("unknown list type %v", val) - l.ElementRelationship = schema.Atomic - } - } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { - if val == "merge" || val == "merge,retainKeys" { - l.ElementRelationship = schema.Associative - if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { - if keyName, ok := key.(string); ok { - l.Keys = []string{keyName} - } else { - c.reportError("uninterpreted merge key: %#v", key) - } - } else { - // It's not an error for this to be absent, it - // means it's a set. - } - } else if val == "retainKeys" { - } else { - c.reportError("unknown patch strategy %v", val) - l.ElementRelationship = schema.Atomic +// Basic conversion functions to convert OpenAPI schema definitions to +// SMD Schema atoms +func convertPrimitive(typ string, format string) (a schema.Atom) { + switch typ { + case "integer": + a.Scalar = ptr(schema.Numeric) + case "number": + a.Scalar = ptr(schema.Numeric) + case "string": + switch format { + case "": + a.Scalar = ptr(schema.String) + case "byte": + // byte really means []byte and is encoded as a string. + a.Scalar = ptr(schema.String) + case "int-or-string": + a.Scalar = ptr(schema.Scalar("untyped")) + case "date-time": + a.Scalar = ptr(schema.Scalar("untyped")) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } + case "boolean": + a.Scalar = ptr(schema.Boolean) + default: + a.Scalar = ptr(schema.Scalar("untyped")) } -} -func (c *convert) VisitMap(m *proto.Map) { - a := c.top() - a.Map = &schema.Map{} - a.Map.ElementType = c.makeRef(m.SubType, c.preserveUnknownFields) + return a +} - ext := m.GetExtensions() - if val, ok := ext["x-kubernetes-map-type"]; ok { +func getListElementRelationship(ext map[string]any) (schema.ElementRelationship, []string, error) { + if val, ok := ext["x-kubernetes-list-type"]; ok { switch val { case "atomic": - a.Map.ElementRelationship = schema.Atomic - case "granular": - a.Map.ElementRelationship = schema.Separable + return schema.Atomic, nil, nil + case "set": + return schema.Associative, nil, nil + case "map": + keys, ok := ext["x-kubernetes-list-map-keys"] + + if !ok { + return schema.Associative, nil, fmt.Errorf("missing map keys") + } + + keyNames, ok := toStringSlice(keys) + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted map keys: %#v", keys) + } + + return schema.Associative, keyNames, nil default: - c.reportError("unknown map type %v", val) + return schema.Atomic, nil, fmt.Errorf("unknown list type %v", val) } - } -} + } else if val, ok := ext["x-kubernetes-patch-strategy"]; ok { + switch val { + case "merge", "merge,retainKeys": + if key, ok := ext["x-kubernetes-patch-merge-key"]; ok { + keyName, ok := key.(string) -func ptr(s schema.Scalar) *schema.Scalar { return &s } + if !ok { + return schema.Associative, nil, fmt.Errorf("uninterpreted merge key: %#v", key) + } -func (c *convert) VisitPrimitive(p *proto.Primitive) { - a := c.top() - if c.currentName == quantityResource { - a.Scalar = ptr(schema.Scalar("untyped")) - } else { - switch p.Type { - case proto.Integer: - a.Scalar = ptr(schema.Numeric) - case proto.Number: - a.Scalar = ptr(schema.Numeric) - case proto.String: - switch p.Format { - case "": - a.Scalar = ptr(schema.String) - case "byte": - // byte really means []byte and is encoded as a string. - a.Scalar = ptr(schema.String) - case "int-or-string": - a.Scalar = ptr(schema.Scalar("untyped")) - case "date-time": - a.Scalar = ptr(schema.Scalar("untyped")) - default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Associative, []string{keyName}, nil } - case proto.Boolean: - a.Scalar = ptr(schema.Boolean) + // It's not an error for x-kubernetes-patch-merge-key to be absent, + // it means it's a set + return schema.Associative, nil, nil + case "retainKeys": + return schema.Atomic, nil, nil default: - a.Scalar = ptr(schema.Scalar("untyped")) + return schema.Atomic, nil, fmt.Errorf("unknown patch strategy %v", val) } } -} -func (c *convert) VisitArbitrary(a *proto.Arbitrary) { - *c.top() = deducedDef.Atom + // Treat as atomic by default + return schema.Atomic, nil, nil } -func (c *convert) VisitReference(proto.Reference) { - // Do nothing, we handle references specially +// Returns map element relationship if specified, or empty string if unspecified +func getMapElementRelationship(ext map[string]any) (schema.ElementRelationship, error) { + val, ok := ext["x-kubernetes-map-type"] + if !ok { + // unset Map element relationship + return "", nil + } + + switch val { + case "atomic": + return schema.Atomic, nil + case "granular": + return schema.Separable, nil + default: + return "", fmt.Errorf("unknown map type %v", val) + } } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go index 51dac4bd..2e2f3d76 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/encoding.go @@ -58,7 +58,7 @@ type EncodingProps struct { // Describes how a specific property value will be serialized depending on its type Style string `json:"style,omitempty"` // When this is true, property values of type array or object generate separate parameters for each value of the array, or key-value-pair of the map. For other types of properties this property has no effect - Explode string `json:"explode,omitempty"` + Explode bool `json:"explode,omitempty"` // AllowReserved determines whether the parameter value SHOULD allow reserved characters, as defined by RFC3986 AllowReserved bool `json:"allowReserved,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go index 0f5ab983..84e21d72 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/example.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/example.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Example https://swagger.io/specification/#example-object diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go index 117113e7..065f4887 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/external_documentation.go @@ -18,8 +18,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) type ExternalDocumentation struct { diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go index 828fd8dc..d502a465 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/media_type.go @@ -56,7 +56,7 @@ func (m *MediaType) UnmarshalJSON(data []byte) error { // MediaTypeProps a struct that allows you to specify content format, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#mediaTypeObject type MediaTypeProps struct { // Schema holds the schema defining the type used for the media type - Schema *spec.Schema `json:"schema,omitempty"` + Schema *spec.Schema `json:"schema,omitempty"` // Example of the media type Example interface{} `json:"example,omitempty"` // Examples of the media type. Each example object should match the media type and specific schema if present diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go index de8aa460..09ce7eaf 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/operation.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Operation describes a single API operation on a path, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#operationObject @@ -73,7 +73,7 @@ type OperationProps struct { // Deprecated declares this operation to be deprecated Deprecated bool `json:"deprecated,omitempty"` // SecurityRequirement holds a declaration of which security mechanisms can be used for this operation - SecurityRequirement []*SecurityRequirement `json:"security,omitempty"` + SecurityRequirement []map[string][]string `json:"security,omitempty"` // Servers contains an alternative server array to service this operation Servers []*Server `json:"servers,omitempty"` } diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go index bc48c504..4a0cae2a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/path.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/path.go @@ -20,8 +20,8 @@ import ( "encoding/json" "strings" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Paths describes the available paths and operations for the API, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#pathsObject diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go index 0adc6282..c00c043b 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/request_body.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // RequestBody describes a single request body, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#requestBodyObject diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go index ccd73369..44d3c156 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/response.go @@ -20,8 +20,8 @@ import ( "encoding/json" "strconv" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // Responses holds the list of possible responses as they are returned from executing this operation @@ -78,20 +78,29 @@ func (r ResponsesProps) MarshalJSON() ([]byte, error) { // UnmarshalJSON unmarshals responses from JSON func (r *ResponsesProps) UnmarshalJSON(data []byte) error { - var res map[string]*Response + var res map[string]json.RawMessage if err := json.Unmarshal(data, &res); err != nil { - return nil + return err } if v, ok := res["default"]; ok { - r.Default = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.Default = &value delete(res, "default") } for k, v := range res { + // Take all integral keys if nk, err := strconv.Atoi(k); err == nil { if r.StatusCodeResponses == nil { r.StatusCodeResponses = map[int]*Response{} } - r.StatusCodeResponses[nk] = v + value := Response{} + if err := json.Unmarshal(v, &value); err != nil { + return err + } + r.StatusCodeResponses[nk] = &value } } return nil @@ -149,7 +158,6 @@ type ResponseProps struct { Links map[string]*Link `json:"links,omitempty"` } - // Link represents a possible design-time link for a response, more at https://swagger.io/specification/#link-object type Link struct { spec.Refable diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go deleted file mode 100644 index 0ce8924e..00000000 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_requirement.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package spec3 - -import ( - "encoding/json" - - "k8s.io/kube-openapi/pkg/validation/spec" - "github.com/go-openapi/swag" -) - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -// -// Note that this struct is actually a thin wrapper around SecurityRequirementProps to make it referable and extensible -type SecurityRequirement struct { - SecurityRequirementProps - spec.VendorExtensible -} - -// MarshalJSON is a custom marshal function that knows how to encode SecurityRequirement as JSON -func (s *SecurityRequirement) MarshalJSON() ([]byte, error) { - b1, err := json.Marshal(s.SecurityRequirementProps) - if err != nil { - return nil, err - } - b2, err := json.Marshal(s.VendorExtensible) - if err != nil { - return nil, err - } - return swag.ConcatJSON(b1, b2), nil -} - -// UnmarshalJSON hydrates this items instance with the data from JSON -func (s *SecurityRequirement) UnmarshalJSON(data []byte) error { - if err := json.Unmarshal(data, &s.SecurityRequirementProps); err != nil { - return err - } - return json.Unmarshal(data, &s.VendorExtensible) -} - -// SecurityRequirementProps describes the required security schemes to execute an operation, more at https://swagger.io/specification/#security-requirement-object -type SecurityRequirementProps map[string][]string diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go index 9b1352f4..edf7e6de 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/security_scheme.go @@ -19,8 +19,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/validation/spec" ) // SecurityScheme defines reusable Security Scheme Object, more at https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#securitySchemeObject diff --git a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go index a505fb22..77104dff 100644 --- a/vendor/k8s.io/kube-openapi/pkg/spec3/server.go +++ b/vendor/k8s.io/kube-openapi/pkg/spec3/server.go @@ -18,9 +18,8 @@ package spec3 import ( "encoding/json" - "k8s.io/kube-openapi/pkg/validation/spec" "github.com/go-openapi/swag" - + "k8s.io/kube-openapi/pkg/validation/spec" ) type Server struct { diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go index a3f476d5..519dcf2e 100644 --- a/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document_v3.go @@ -120,7 +120,7 @@ func (d *Definitions) ParseSchemaV3(s *openapi_v3.Schema, path *Path) (Schema, e switch s.GetType() { case object: for _, extension := range s.GetSpecificationExtension() { - if extension.Name == "x-kuberentes-group-version-kind" { + if extension.Name == "x-kubernetes-group-version-kind" { // Objects with x-kubernetes-group-version-kind are always top // level types. return d.parseV3Kind(s, path) @@ -285,7 +285,7 @@ func parseV3Interface(def *yaml.Node) (interface{}, error) { func (d *Definitions) parseV3BaseSchema(s *openapi_v3.Schema, path *Path) (*BaseSchema, error) { if s == nil { - return nil, fmt.Errorf("cannot initializae BaseSchema from nil") + return nil, fmt.Errorf("cannot initialize BaseSchema from nil") } def, err := parseV3Interface(s.GetDefault().ToRawInfo()) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go index 9a255630..05310c46 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/header.go @@ -43,6 +43,9 @@ type Header struct { // MarshalJSON marshal this to JSON func (h Header) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(h) + } b1, err := json.Marshal(h.CommonValidations) if err != nil { return nil, err @@ -62,6 +65,20 @@ func (h Header) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4), nil } +func (h Header) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Extensions + HeaderProps + } + x.CommonValidations = commonValidationsOmitZero(h.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(h.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(h.Extensions) + x.HeaderProps = h.HeaderProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals this header from JSON func (h *Header) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -94,12 +111,8 @@ func (h *Header) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Dec h.CommonValidations = x.CommonValidations h.SimpleSchema = x.SimpleSchema - h.Extensions = x.Extensions + h.Extensions = internal.SanitizeExtensions(x.Extensions) h.HeaderProps = x.HeaderProps - h.Extensions.sanitize() - if len(h.Extensions) == 0 { - h.Extensions = nil - } return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go index 395ececa..d667b705 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/info.go @@ -89,17 +89,9 @@ func (e Extensions) GetObject(key string, out interface{}) error { return nil } -func (e Extensions) sanitize() { - for k := range e { - if !isExtensionKey(k) { - delete(e, k) - } - } -} - func (e Extensions) sanitizeWithExtra() (extra map[string]any) { for k, v := range e { - if !isExtensionKey(k) { + if !internal.IsExtensionKey(k) { if extra == nil { extra = make(map[string]any) } @@ -110,10 +102,6 @@ func (e Extensions) sanitizeWithExtra() (extra map[string]any) { return extra } -func isExtensionKey(k string) bool { - return len(k) > 1 && (k[0] == 'x' || k[0] == 'X') && k[1] == '-' -} - // VendorExtensible composition block. type VendorExtensible struct { Extensions Extensions @@ -181,6 +169,9 @@ type Info struct { // MarshalJSON marshal this to JSON func (i Info) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.InfoProps) if err != nil { return nil, err @@ -192,6 +183,16 @@ func (i Info) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (i Info) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + InfoProps + } + x.Extensions = i.Extensions + x.InfoProps = i.InfoProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (i *Info) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -212,11 +213,7 @@ func (i *Info) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decod if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) i.InfoProps = x.InfoProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go index 374f90d2..4132467d 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/items.go @@ -37,6 +37,18 @@ type SimpleSchema struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type simpleSchemaOmitZero struct { + Type string `json:"type,omitempty"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Items *Items `json:"items,omitzero"` + CollectionFormat string `json:"collectionFormat,omitempty"` + Default interface{} `json:"default,omitempty"` + Example interface{} `json:"example,omitempty"` +} + // CommonValidations describe common JSON-schema validations type CommonValidations struct { Maximum *float64 `json:"maximum,omitempty"` @@ -53,6 +65,23 @@ type CommonValidations struct { Enum []interface{} `json:"enum,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type commonValidationsOmitZero struct { + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` +} + // Items a limited subset of JSON-Schema's items object. // It is used by parameter definitions that are not located in "body". // @@ -105,18 +134,18 @@ func (i *Items) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco if err := i.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } + i.CommonValidations = x.CommonValidations i.SimpleSchema = x.SimpleSchema - i.VendorExtensible.Extensions = x.Extensions + i.Extensions = internal.SanitizeExtensions(x.Extensions) return nil } // MarshalJSON converts this items object to JSON func (i Items) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(i) + } b1, err := json.Marshal(i.CommonValidations) if err != nil { return nil, err @@ -135,3 +164,17 @@ func (i Items) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b4, b3, b1, b2), nil } + +func (i Items) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(i.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(i.SimpleSchema) + x.Ref = i.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(i.Extensions) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go index 923769ae..63eed346 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/operation.go @@ -42,6 +42,23 @@ type OperationProps struct { Responses *Responses `json:"responses,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type operationPropsOmitZero struct { + Description string `json:"description,omitempty"` + Consumes []string `json:"consumes,omitempty"` + Produces []string `json:"produces,omitempty"` + Schemes []string `json:"schemes,omitempty"` + Tags []string `json:"tags,omitempty"` + Summary string `json:"summary,omitempty"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + ID string `json:"operationId,omitempty"` + Deprecated bool `json:"deprecated,omitempty,omitzero"` + Security []map[string][]string `json:"security,omitempty"` + Parameters []Parameter `json:"parameters,omitempty"` + Responses *Responses `json:"responses,omitzero"` +} + // MarshalJSON takes care of serializing operation properties to JSON // // We use a custom marhaller here to handle a special cases related to @@ -96,17 +113,16 @@ func (o *Operation) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - o.VendorExtensible.Extensions = x.Extensions + o.Extensions = internal.SanitizeExtensions(x.Extensions) o.OperationProps = OperationProps(x.OperationPropsNoMethods) return nil } // MarshalJSON converts this items object to JSON func (o Operation) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(o) + } b1, err := json.Marshal(o.OperationProps) if err != nil { return nil, err @@ -118,3 +134,13 @@ func (o Operation) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (o Operation) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + OperationProps operationPropsOmitZero `json:",inline"` + } + x.Extensions = internal.SanitizeExtensions(o.Extensions) + x.OperationProps = operationPropsOmitZero(o.OperationProps) + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go index 7cb229ac..53d1e0aa 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/parameter.go @@ -36,6 +36,17 @@ type ParamProps struct { AllowEmptyValue bool `json:"allowEmptyValue,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type paramPropsOmitZero struct { + Description string `json:"description,omitempty"` + Name string `json:"name,omitempty"` + In string `json:"in,omitempty"` + Required bool `json:"required,omitzero"` + Schema *Schema `json:"schema,omitzero"` + AllowEmptyValue bool `json:"allowEmptyValue,omitzero"` +} + // Parameter a unique parameter is defined by a combination of a [name](#parameterName) and [location](#parameterIn). // // There are five possible parameter types. @@ -109,19 +120,18 @@ func (p *Parameter) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } p.CommonValidations = x.CommonValidations p.SimpleSchema = x.SimpleSchema - p.VendorExtensible.Extensions = x.Extensions + p.Extensions = internal.SanitizeExtensions(x.Extensions) p.ParamProps = x.ParamProps return nil } // MarshalJSON converts this items object to JSON func (p Parameter) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.CommonValidations) if err != nil { return nil, err @@ -144,3 +154,19 @@ func (p Parameter) MarshalJSON() ([]byte, error) { } return swag.ConcatJSON(b3, b1, b2, b4, b5), nil } + +func (p Parameter) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + CommonValidations commonValidationsOmitZero `json:",inline"` + SimpleSchema simpleSchemaOmitZero `json:",inline"` + ParamProps paramPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + Extensions + } + x.CommonValidations = commonValidationsOmitZero(p.CommonValidations) + x.SimpleSchema = simpleSchemaOmitZero(p.SimpleSchema) + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.ParamProps = paramPropsOmitZero(p.ParamProps) + x.Ref = p.Refable.Ref.String() + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go index 03741fcf..1d1588cb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/path_item.go @@ -70,24 +70,20 @@ func (p *PathItem) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - p.Extensions = x.Extensions - p.PathItemProps = x.PathItemProps - - if err := p.Refable.Ref.fromMap(p.Extensions); err != nil { + if err := p.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - p.Extensions.sanitize() - if len(p.Extensions) == 0 { - p.Extensions = nil - } + p.Extensions = internal.SanitizeExtensions(x.Extensions) + p.PathItemProps = x.PathItemProps return nil } // MarshalJSON converts this items object to JSON func (p PathItem) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b3, err := json.Marshal(p.Refable) if err != nil { return nil, err @@ -103,3 +99,15 @@ func (p PathItem) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b3, b4, b5) return concated, nil } + +func (p PathItem) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + PathItemProps + } + x.Ref = p.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(p.Extensions) + x.PathItemProps = p.PathItemProps + return opts.MarshalNext(enc, x) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go index 7c63d440..18f6a9f4 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/paths.go @@ -92,7 +92,7 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err @@ -114,7 +114,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco p.Paths[k] = pi default: _, err := dec.ReadValue() // skip value - return err + if err != nil { + return err + } } } default: @@ -124,6 +126,9 @@ func (p *Paths) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Deco // MarshalJSON converts this items object to JSON func (p Paths) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(p) + } b1, err := json.Marshal(p.VendorExtensible) if err != nil { return nil, err @@ -142,3 +147,18 @@ func (p Paths) MarshalJSON() ([]byte, error) { concated := swag.ConcatJSON(b1, b2) return concated, nil } + +func (p Paths) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + m := make(map[string]any, len(p.Extensions)+len(p.Paths)) + for k, v := range p.Extensions { + if internal.IsExtensionKey(k) { + m[k] = v + } + } + for k, v := range p.Paths { + if strings.HasPrefix(k, "/") { + m[k] = v + } + } + return opts.MarshalNext(enc, m) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go index 1405bfd8..775b3b0c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/ref.go @@ -21,6 +21,8 @@ import ( "path/filepath" "github.com/go-openapi/jsonreference" + + "k8s.io/kube-openapi/pkg/internal" ) // Refable is a struct for things that accept a $ref property @@ -149,19 +151,5 @@ func (r *Ref) UnmarshalJSON(d []byte) error { } func (r *Ref) fromMap(v map[string]interface{}) error { - if v == nil { - return nil - } - - if vv, ok := v["$ref"]; ok { - if str, ok := vv.(string); ok { - ref, err := jsonreference.New(str) - if err != nil { - return err - } - *r = Ref{Ref: ref} - } - } - - return nil + return internal.JSONRefFromMap(&r.Ref, v) } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go index f01364b7..3ff1fe13 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/response.go @@ -30,6 +30,15 @@ type ResponseProps struct { Examples map[string]interface{} `json:"examples,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type responsePropsOmitZero struct { + Description string `json:"description,omitempty"` + Schema *Schema `json:"schema,omitzero"` + Headers map[string]Header `json:"headers,omitempty"` + Examples map[string]interface{} `json:"examples,omitempty"` +} + // Response describes a single response from an API Operation. // // For more information: http://goo.gl/8us55a#responseObject @@ -68,23 +77,20 @@ func (r *Response) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.D return err } - r.Extensions = x.Extensions - r.ResponseProps = x.ResponseProps - - if err := r.Refable.Ref.fromMap(r.Extensions); err != nil { + if err := r.Refable.Ref.fromMap(x.Extensions); err != nil { return err } - - r.Extensions.sanitize() - if len(r.Extensions) == 0 { - r.Extensions = nil - } + r.Extensions = internal.SanitizeExtensions(x.Extensions) + r.ResponseProps = x.ResponseProps return nil } // MarshalJSON converts this items object to JSON func (r Response) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponseProps) if err != nil { return nil, err @@ -100,6 +106,18 @@ func (r Response) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3), nil } +func (r Response) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Ref string `json:"$ref,omitempty"` + Extensions + ResponseProps responsePropsOmitZero `json:",inline"` + } + x.Ref = r.Refable.Ref.String() + x.Extensions = internal.SanitizeExtensions(r.Extensions) + x.ResponseProps = responsePropsOmitZero(r.ResponseProps) + return opts.MarshalNext(enc, x) +} + // NewResponse creates a new response instance func NewResponse() *Response { return new(Response) diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go index c3fa6819..d9ad760a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/responses.go @@ -63,6 +63,9 @@ func (r *Responses) UnmarshalJSON(data []byte) error { // MarshalJSON converts this items object to JSON func (r Responses) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(r) + } b1, err := json.Marshal(r.ResponsesProps) if err != nil { return nil, err @@ -75,6 +78,25 @@ func (r Responses) MarshalJSON() ([]byte, error) { return concated, nil } +func (r Responses) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + Default *Response `json:"default,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(r.Extensions)+len(r.StatusCodeResponses)) + for k, v := range r.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range r.StatusCodeResponses { + x.ArbitraryKeys[strconv.Itoa(k)] = v + } + x.Default = r.Default + return opts.MarshalNext(enc, x) +} + // ResponsesProps describes all responses for an operation. // It tells what is the default response and maps all responses with a // HTTP status code. @@ -148,7 +170,7 @@ func (r *Responses) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2. return nil } switch k := tok.String(); { - case isExtensionKey(k): + case internal.IsExtensionKey(k): ext = nil if err := opts.UnmarshalNext(dec, &ext); err != nil { return err diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go index 9add0c16..e28b5848 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/schema.go @@ -196,6 +196,46 @@ type SchemaProps struct { Definitions Definitions `json:"definitions,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type schemaPropsOmitZero struct { + ID string `json:"id,omitempty"` + Ref Ref `json:"-"` + Schema SchemaURL `json:"-"` + Description string `json:"description,omitempty"` + Type StringOrArray `json:"type,omitzero"` + Nullable bool `json:"nullable,omitzero"` + Format string `json:"format,omitempty"` + Title string `json:"title,omitempty"` + Default interface{} `json:"default,omitzero"` + Maximum *float64 `json:"maximum,omitempty"` + ExclusiveMaximum bool `json:"exclusiveMaximum,omitzero"` + Minimum *float64 `json:"minimum,omitempty"` + ExclusiveMinimum bool `json:"exclusiveMinimum,omitzero"` + MaxLength *int64 `json:"maxLength,omitempty"` + MinLength *int64 `json:"minLength,omitempty"` + Pattern string `json:"pattern,omitempty"` + MaxItems *int64 `json:"maxItems,omitempty"` + MinItems *int64 `json:"minItems,omitempty"` + UniqueItems bool `json:"uniqueItems,omitzero"` + MultipleOf *float64 `json:"multipleOf,omitempty"` + Enum []interface{} `json:"enum,omitempty"` + MaxProperties *int64 `json:"maxProperties,omitempty"` + MinProperties *int64 `json:"minProperties,omitempty"` + Required []string `json:"required,omitempty"` + Items *SchemaOrArray `json:"items,omitzero"` + AllOf []Schema `json:"allOf,omitempty"` + OneOf []Schema `json:"oneOf,omitempty"` + AnyOf []Schema `json:"anyOf,omitempty"` + Not *Schema `json:"not,omitzero"` + Properties map[string]Schema `json:"properties,omitempty"` + AdditionalProperties *SchemaOrBool `json:"additionalProperties,omitzero"` + PatternProperties map[string]Schema `json:"patternProperties,omitempty"` + Dependencies Dependencies `json:"dependencies,omitempty"` + AdditionalItems *SchemaOrBool `json:"additionalItems,omitzero"` + Definitions Definitions `json:"definitions,omitempty"` +} + // SwaggerSchemaProps are additional properties supported by swagger schemas, but not JSON-schema (draft 4) type SwaggerSchemaProps struct { Discriminator string `json:"discriminator,omitempty"` @@ -204,6 +244,15 @@ type SwaggerSchemaProps struct { Example interface{} `json:"example,omitempty"` } +// Marshaling structure only, always edit along with corresponding +// struct (or compilation will fail). +type swaggerSchemaPropsOmitZero struct { + Discriminator string `json:"discriminator,omitempty"` + ReadOnly bool `json:"readOnly,omitzero"` + ExternalDocs *ExternalDocumentation `json:"externalDocs,omitzero"` + Example interface{} `json:"example,omitempty"` +} + // Schema the schema object allows the definition of input and output data types. // These types can be objects, but also primitives and arrays. // This object is based on the [JSON Schema Specification Draft 4](http://json-schema.org/) @@ -434,6 +483,9 @@ func (s *Schema) WithExternalDocs(description, url string) *Schema { // MarshalJSON marshal this to JSON func (s Schema) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SchemaProps) if err != nil { return nil, fmt.Errorf("schema props %v", err) @@ -465,6 +517,29 @@ func (s Schema) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2, b3, b4, b5, b6), nil } +func (s Schema) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + type ArbitraryKeys map[string]interface{} + var x struct { + ArbitraryKeys + SchemaProps schemaPropsOmitZero `json:",inline"` + SwaggerSchemaProps swaggerSchemaPropsOmitZero `json:",inline"` + Ref string `json:"$ref,omitempty"` + } + x.ArbitraryKeys = make(map[string]any, len(s.Extensions)+len(s.ExtraProps)) + for k, v := range s.Extensions { + if internal.IsExtensionKey(k) { + x.ArbitraryKeys[k] = v + } + } + for k, v := range s.ExtraProps { + x.ArbitraryKeys[k] = v + } + x.SchemaProps = schemaPropsOmitZero(s.SchemaProps) + x.SwaggerSchemaProps = swaggerSchemaPropsOmitZero(s.SwaggerSchemaProps) + x.Ref = s.Ref.String() + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *Schema) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go index 34723fb7..e2b7da14 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/security_scheme.go @@ -18,6 +18,7 @@ import ( "encoding/json" "github.com/go-openapi/swag" + "k8s.io/kube-openapi/pkg/internal" jsonv2 "k8s.io/kube-openapi/pkg/internal/third_party/go-json-experiment/json" ) @@ -45,6 +46,9 @@ type SecurityScheme struct { // MarshalJSON marshal this to JSON func (s SecurityScheme) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SecuritySchemeProps) if err != nil { return nil, err @@ -56,6 +60,16 @@ func (s SecurityScheme) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (s SecurityScheme) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SecuritySchemeProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SecuritySchemeProps = s.SecuritySchemeProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (s *SecurityScheme) UnmarshalJSON(data []byte) error { if err := json.Unmarshal(data, &s.SecuritySchemeProps); err != nil { @@ -72,11 +86,7 @@ func (s *SecurityScheme) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *js if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - s.VendorExtensible.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SecuritySchemeProps = x.SecuritySchemeProps return nil } diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go index f6cb7da3..565d172a 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/swagger.go @@ -35,6 +35,9 @@ type Swagger struct { // MarshalJSON marshals this swagger structure to json func (s Swagger) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } b1, err := json.Marshal(s.SwaggerProps) if err != nil { return nil, err @@ -46,12 +49,22 @@ func (s Swagger) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +// MarshalJSON marshals this swagger structure to json +func (s Swagger) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + SwaggerProps + } + x.Extensions = internal.SanitizeExtensions(s.Extensions) + x.SwaggerProps = s.SwaggerProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON unmarshals a swagger spec from json func (s *Swagger) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { return jsonv2.Unmarshal(data, s) } - var sw Swagger if err := json.Unmarshal(data, &sw.SwaggerProps); err != nil { return err @@ -75,15 +88,8 @@ func (s *Swagger) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.De if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - - s.Extensions = x.Extensions + s.Extensions = internal.SanitizeExtensions(x.Extensions) s.SwaggerProps = x.SwaggerProps - - s.Extensions.sanitize() - if len(s.Extensions) == 0 { - s.Extensions = nil - } - return nil } @@ -126,6 +132,9 @@ var jsFalse = []byte("false") // MarshalJSON convert this object to JSON func (s SchemaOrBool) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if s.Schema != nil { return json.Marshal(s.Schema) } @@ -136,6 +145,18 @@ func (s SchemaOrBool) MarshalJSON() ([]byte, error) { return jsTrue, nil } +// MarshalJSON convert this object to JSON +func (s SchemaOrBool) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + + if s.Schema == nil && !s.Allows { + return enc.WriteToken(jsonv2.False) + } + return enc.WriteToken(jsonv2.True) +} + // UnmarshalJSON converts this bool or schema object from a JSON structure func (s *SchemaOrBool) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -185,6 +206,9 @@ type SchemaOrStringArray struct { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Property) > 0 { return json.Marshal(s.Property) } @@ -194,6 +218,17 @@ func (s SchemaOrStringArray) MarshalJSON() ([]byte, error) { return []byte("null"), nil } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrStringArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Property) > 0 { + return opts.MarshalNext(enc, s.Property) + } + if s.Schema != nil { + return opts.MarshalNext(enc, s.Schema) + } + return enc.WriteToken(jsonv2.Null) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrStringArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -347,12 +382,23 @@ func (s *SchemaOrArray) ContainsType(name string) bool { // MarshalJSON converts this schema object or array into JSON structure func (s SchemaOrArray) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(s) + } if len(s.Schemas) > 0 { return json.Marshal(s.Schemas) } return json.Marshal(s.Schema) } +// MarshalJSON converts this schema object or array into JSON structure +func (s SchemaOrArray) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + if len(s.Schemas) > 0 { + return opts.MarshalNext(enc, s.Schemas) + } + return opts.MarshalNext(enc, s.Schema) +} + // UnmarshalJSON converts this schema object or array from a JSON structure func (s *SchemaOrArray) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { diff --git a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go index 69e93b60..d105d52c 100644 --- a/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go +++ b/vendor/k8s.io/kube-openapi/pkg/validation/spec/tag.go @@ -41,6 +41,9 @@ type Tag struct { // MarshalJSON marshal this to JSON func (t Tag) MarshalJSON() ([]byte, error) { + if internal.UseOptimizedJSONMarshaling { + return internal.DeterministicMarshal(t) + } b1, err := json.Marshal(t.TagProps) if err != nil { return nil, err @@ -52,6 +55,16 @@ func (t Tag) MarshalJSON() ([]byte, error) { return swag.ConcatJSON(b1, b2), nil } +func (t Tag) MarshalNextJSON(opts jsonv2.MarshalOptions, enc *jsonv2.Encoder) error { + var x struct { + Extensions + TagProps + } + x.Extensions = internal.SanitizeExtensions(t.Extensions) + x.TagProps = t.TagProps + return opts.MarshalNext(enc, x) +} + // UnmarshalJSON marshal this from JSON func (t *Tag) UnmarshalJSON(data []byte) error { if internal.UseOptimizedJSONUnmarshaling { @@ -72,11 +85,7 @@ func (t *Tag) UnmarshalNextJSON(opts jsonv2.UnmarshalOptions, dec *jsonv2.Decode if err := opts.UnmarshalNext(dec, &x); err != nil { return err } - x.Extensions.sanitize() - if len(x.Extensions) == 0 { - x.Extensions = nil - } - t.VendorExtensible.Extensions = x.Extensions + t.Extensions = internal.SanitizeExtensions(x.Extensions) t.TagProps = x.TagProps return nil } diff --git a/vendor/k8s.io/utils/buffer/ring_growing.go b/vendor/k8s.io/utils/buffer/ring_growing.go new file mode 100644 index 00000000..86965a51 --- /dev/null +++ b/vendor/k8s.io/utils/buffer/ring_growing.go @@ -0,0 +1,72 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package buffer + +// RingGrowing is a growing ring buffer. +// Not thread safe. +type RingGrowing struct { + data []interface{} + n int // Size of Data + beg int // First available element + readable int // Number of data items available +} + +// NewRingGrowing constructs a new RingGrowing instance with provided parameters. +func NewRingGrowing(initialSize int) *RingGrowing { + return &RingGrowing{ + data: make([]interface{}, initialSize), + n: initialSize, + } +} + +// ReadOne reads (consumes) first item from the buffer if it is available, otherwise returns false. +func (r *RingGrowing) ReadOne() (data interface{}, ok bool) { + if r.readable == 0 { + return nil, false + } + r.readable-- + element := r.data[r.beg] + r.data[r.beg] = nil // Remove reference to the object to help GC + if r.beg == r.n-1 { + // Was the last element + r.beg = 0 + } else { + r.beg++ + } + return element, true +} + +// WriteOne adds an item to the end of the buffer, growing it if it is full. +func (r *RingGrowing) WriteOne(data interface{}) { + if r.readable == r.n { + // Time to grow + newN := r.n * 2 + newData := make([]interface{}, newN) + to := r.beg + r.readable + if to <= r.n { + copy(newData, r.data[r.beg:to]) + } else { + copied := copy(newData, r.data[r.beg:]) + copy(newData[copied:], r.data[:(to%r.n)]) + } + r.beg = 0 + r.data = newData + r.n = newN + } + r.data[(r.readable+r.beg)%r.n] = data + r.readable++ +} diff --git a/vendor/k8s.io/utils/net/ipfamily.go b/vendor/k8s.io/utils/net/ipfamily.go new file mode 100644 index 00000000..1a51fa39 --- /dev/null +++ b/vendor/k8s.io/utils/net/ipfamily.go @@ -0,0 +1,181 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "net" +) + +// IPFamily refers to a specific family if not empty, i.e. "4" or "6". +type IPFamily string + +// Constants for valid IPFamilys: +const ( + IPFamilyUnknown IPFamily = "" + + IPv4 IPFamily = "4" + IPv6 IPFamily = "6" +) + +// IsDualStackIPs returns true if: +// - all elements of ips are valid +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPs(ips []net.IP) (bool, error) { + v4Found := false + v6Found := false + for i, ip := range ips { + switch IPFamilyOf(ip) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackIPStrings returns true if: +// - all elements of ips can be parsed as IPs +// - at least one IP from each family (v4 and v6) is present +func IsDualStackIPStrings(ips []string) (bool, error) { + parsedIPs := make([]net.IP, 0, len(ips)) + for i, ip := range ips { + parsedIP := ParseIPSloppy(ip) + if parsedIP == nil { + return false, fmt.Errorf("invalid IP[%d]: %v", i, ip) + } + parsedIPs = append(parsedIPs, parsedIP) + } + return IsDualStackIPs(parsedIPs) +} + +// IsDualStackCIDRs returns true if: +// - all elements of cidrs are non-nil +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { + v4Found := false + v6Found := false + for i, cidr := range cidrs { + switch IPFamilyOfCIDR(cidr) { + case IPv4: + v4Found = true + case IPv6: + v6Found = true + default: + return false, fmt.Errorf("invalid CIDR[%d]: %v", i, cidr) + } + } + + return (v4Found && v6Found), nil +} + +// IsDualStackCIDRStrings returns if +// - all elements of cidrs can be parsed as CIDRs +// - at least one CIDR from each family (v4 and v6) is present +func IsDualStackCIDRStrings(cidrs []string) (bool, error) { + parsedCIDRs, err := ParseCIDRs(cidrs) + if err != nil { + return false, err + } + return IsDualStackCIDRs(parsedCIDRs) +} + +// IPFamilyOf returns the IP family of ip, or IPFamilyUnknown if it is invalid. +func IPFamilyOf(ip net.IP) IPFamily { + switch { + case ip.To4() != nil: + return IPv4 + case ip.To16() != nil: + return IPv6 + default: + return IPFamilyUnknown + } +} + +// IPFamilyOfString returns the IP family of ip, or IPFamilyUnknown if ip cannot +// be parsed as an IP. +func IPFamilyOfString(ip string) IPFamily { + return IPFamilyOf(ParseIPSloppy(ip)) +} + +// IPFamilyOfCIDR returns the IP family of cidr. +func IPFamilyOfCIDR(cidr *net.IPNet) IPFamily { + if cidr == nil { + return IPFamilyUnknown + } + return IPFamilyOf(cidr.IP) +} + +// IPFamilyOfCIDRString returns the IP family of cidr. +func IPFamilyOfCIDRString(cidr string) IPFamily { + ip, _, _ := ParseCIDRSloppy(cidr) + return IPFamilyOf(ip) +} + +// IsIPv6 returns true if netIP is IPv6 (and false if it is IPv4, nil, or invalid). +func IsIPv6(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv6 +} + +// IsIPv6String returns true if ip contains a single IPv6 address and nothing else. It +// returns false if ip is an empty string, an IPv4 address, or anything else that is not a +// single IPv6 address. +func IsIPv6String(ip string) bool { + return IPFamilyOfString(ip) == IPv6 +} + +// IsIPv6CIDR returns true if a cidr is a valid IPv6 CIDR. It returns false if cidr is +// nil or an IPv4 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv6CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv6 +} + +// IsIPv6CIDRString returns true if cidr contains a single IPv6 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv4 CIDR, or anything else that is not a +// single valid IPv6 CIDR. +func IsIPv6CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv6 +} + +// IsIPv4 returns true if netIP is IPv4 (and false if it is IPv6, nil, or invalid). +func IsIPv4(netIP net.IP) bool { + return IPFamilyOf(netIP) == IPv4 +} + +// IsIPv4String returns true if ip contains a single IPv4 address and nothing else. It +// returns false if ip is an empty string, an IPv6 address, or anything else that is not a +// single IPv4 address. +func IsIPv4String(ip string) bool { + return IPFamilyOfString(ip) == IPv4 +} + +// IsIPv4CIDR returns true if cidr is a valid IPv4 CIDR. It returns false if cidr is nil +// or an IPv6 CIDR. Its behavior is not defined if cidr is invalid. +func IsIPv4CIDR(cidr *net.IPNet) bool { + return IPFamilyOfCIDR(cidr) == IPv4 +} + +// IsIPv4CIDRString returns true if cidr contains a single IPv4 CIDR and nothing else. It +// returns false if cidr is an empty string, an IPv6 CIDR, or anything else that is not a +// single valid IPv4 CIDR. +func IsIPv4CIDRString(cidr string) bool { + return IPFamilyOfCIDRString(cidr) == IPv4 +} diff --git a/vendor/k8s.io/utils/net/net.go b/vendor/k8s.io/utils/net/net.go index b7c08e2e..704c1f23 100644 --- a/vendor/k8s.io/utils/net/net.go +++ b/vendor/k8s.io/utils/net/net.go @@ -29,138 +29,16 @@ import ( // order is maintained func ParseCIDRs(cidrsString []string) ([]*net.IPNet, error) { cidrs := make([]*net.IPNet, 0, len(cidrsString)) - for _, cidrString := range cidrsString { + for i, cidrString := range cidrsString { _, cidr, err := ParseCIDRSloppy(cidrString) if err != nil { - return nil, fmt.Errorf("failed to parse cidr value:%q with error:%v", cidrString, err) + return nil, fmt.Errorf("invalid CIDR[%d]: %v (%v)", i, cidr, err) } cidrs = append(cidrs, cidr) } return cidrs, nil } -// IsDualStackIPs returns if a slice of ips is: -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPs(ips []net.IP) (bool, error) { - v4Found := false - v6Found := false - for _, ip := range ips { - if ip == nil { - return false, fmt.Errorf("ip %v is invalid", ip) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(ip) { - v6Found = true - continue - } - - v4Found = true - } - - return (v4Found && v6Found), nil -} - -// IsDualStackIPStrings returns if -// - all are valid ips -// - at least one ip from each family (v4 or v6) -func IsDualStackIPStrings(ips []string) (bool, error) { - parsedIPs := make([]net.IP, 0, len(ips)) - for _, ip := range ips { - parsedIP := ParseIPSloppy(ip) - parsedIPs = append(parsedIPs, parsedIP) - } - return IsDualStackIPs(parsedIPs) -} - -// IsDualStackCIDRs returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRs(cidrs []*net.IPNet) (bool, error) { - v4Found := false - v6Found := false - for _, cidr := range cidrs { - if cidr == nil { - return false, fmt.Errorf("cidr %v is invalid", cidr) - } - - if v4Found && v6Found { - continue - } - - if IsIPv6(cidr.IP) { - v6Found = true - continue - } - v4Found = true - } - - return v4Found && v6Found, nil -} - -// IsDualStackCIDRStrings returns if -// - all are valid cidrs -// - at least one cidr from each family (v4 or v6) -func IsDualStackCIDRStrings(cidrs []string) (bool, error) { - parsedCIDRs, err := ParseCIDRs(cidrs) - if err != nil { - return false, err - } - return IsDualStackCIDRs(parsedCIDRs) -} - -// IsIPv6 returns if netIP is IPv6. -func IsIPv6(netIP net.IP) bool { - return netIP != nil && netIP.To4() == nil -} - -// IsIPv6String returns if ip is IPv6. -func IsIPv6String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv6(netIP) -} - -// IsIPv6CIDRString returns if cidr is IPv6. -// This assumes cidr is a valid CIDR. -func IsIPv6CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv6(ip) -} - -// IsIPv6CIDR returns if a cidr is ipv6 -func IsIPv6CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv6(ip) -} - -// IsIPv4 returns if netIP is IPv4. -func IsIPv4(netIP net.IP) bool { - return netIP != nil && netIP.To4() != nil -} - -// IsIPv4String returns if ip is IPv4. -func IsIPv4String(ip string) bool { - netIP := ParseIPSloppy(ip) - return IsIPv4(netIP) -} - -// IsIPv4CIDR returns if a cidr is ipv4 -func IsIPv4CIDR(cidr *net.IPNet) bool { - ip := cidr.IP - return IsIPv4(ip) -} - -// IsIPv4CIDRString returns if cidr is IPv4. -// This assumes cidr is a valid CIDR. -func IsIPv4CIDRString(cidr string) bool { - ip, _, _ := ParseCIDRSloppy(cidr) - return IsIPv4(ip) -} - // ParsePort parses a string representing an IP port. If the string is not a // valid port number, this returns an error. func ParsePort(port string, allowZero bool) (int, error) { diff --git a/vendor/k8s.io/utils/net/port.go b/vendor/k8s.io/utils/net/port.go index 7ac04f0d..c6a53fa0 100644 --- a/vendor/k8s.io/utils/net/port.go +++ b/vendor/k8s.io/utils/net/port.go @@ -23,15 +23,6 @@ import ( "strings" ) -// IPFamily refers to a specific family if not empty, i.e. "4" or "6". -type IPFamily string - -// Constants for valid IPFamilys: -const ( - IPv4 IPFamily = "4" - IPv6 = "6" -) - // Protocol is a network protocol support by LocalPort. type Protocol string @@ -67,7 +58,7 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if protocol != TCP && protocol != UDP { return nil, fmt.Errorf("Unsupported protocol %s", protocol) } - if ipFamily != "" && ipFamily != "4" && ipFamily != "6" { + if ipFamily != IPFamilyUnknown && ipFamily != IPv4 && ipFamily != IPv6 { return nil, fmt.Errorf("Invalid IP family %s", ipFamily) } if ip != "" { @@ -75,9 +66,10 @@ func NewLocalPort(desc, ip string, ipFamily IPFamily, port int, protocol Protoco if parsedIP == nil { return nil, fmt.Errorf("invalid ip address %s", ip) } - asIPv4 := parsedIP.To4() - if asIPv4 == nil && ipFamily == IPv4 || asIPv4 != nil && ipFamily == IPv6 { - return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + if ipFamily != IPFamilyUnknown { + if IPFamily(parsedIP) != ipFamily { + return nil, fmt.Errorf("ip address and family mismatch %s, %s", ip, ipFamily) + } } } return &LocalPort{Description: desc, IP: ip, IPFamily: ipFamily, Port: port, Protocol: protocol}, nil diff --git a/vendor/k8s.io/utils/trace/README.md b/vendor/k8s.io/utils/trace/README.md new file mode 100644 index 00000000..1e9c6938 --- /dev/null +++ b/vendor/k8s.io/utils/trace/README.md @@ -0,0 +1,67 @@ +# Trace + +This package provides an interface for recording the latency of operations and logging details +about all operations where the latency exceeds a limit. + +## Usage + +To create a trace: + +```go +func doSomething() { + opTrace := trace.New("operation", Field{Key: "fieldKey1", Value: "fieldValue1"}) + defer opTrace.LogIfLong(100 * time.Millisecond) + // do something +} +``` + +To split an trace into multiple steps: + +```go +func doSomething() { + opTrace := trace.New("operation") + defer opTrace.LogIfLong(100 * time.Millisecond) + // do step 1 + opTrace.Step("step1", Field{Key: "stepFieldKey1", Value: "stepFieldValue1"}) + // do step 2 + opTrace.Step("step2") +} +``` + +To nest traces: + +```go +func doSomething() { + rootTrace := trace.New("rootOperation") + defer rootTrace.LogIfLong(100 * time.Millisecond) + + func() { + nestedTrace := rootTrace.Nest("nested", Field{Key: "nestedFieldKey1", Value: "nestedFieldValue1"}) + defer nestedTrace.LogIfLong(50 * time.Millisecond) + // do nested operation + }() +} +``` + +Traces can also be logged unconditionally or introspected: + +```go +opTrace.TotalTime() // Duration since the Trace was created +opTrace.Log() // unconditionally log the trace +``` + +### Using context.Context to nest traces + +`context.Context` can be used to manage nested traces. Create traces by calling `trace.GetTraceFromContext(ctx).Nest`. +This is safe even if there is no parent trace already in the context because `(*(Trace)nil).Nest()` returns +a top level trace. + +```go +func doSomething(ctx context.Context) { + opTrace := trace.FromContext(ctx).Nest("operation") // create a trace, possibly nested + ctx = trace.ContextWithTrace(ctx, opTrace) // make this trace the parent trace of the context + defer opTrace.LogIfLong(50 * time.Millisecond) + + doSomethingElse(ctx) +} +``` \ No newline at end of file diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go new file mode 100644 index 00000000..a0b07a6d --- /dev/null +++ b/vendor/k8s.io/utils/trace/trace.go @@ -0,0 +1,300 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "sync" + "time" + + "k8s.io/klog/v2" +) + +var klogV = func(lvl klog.Level) bool { + return klog.V(lvl).Enabled() +} + +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + +func writeTraceItemSummary(b *bytes.Buffer, msg string, totalTime time.Duration, startTime time.Time, fields []Field) { + b.WriteString(fmt.Sprintf("%q ", msg)) + if len(fields) > 0 { + writeFields(b, fields) + b.WriteString(" ") + } + + b.WriteString(fmt.Sprintf("%vms (%v)", durationToMilliseconds(totalTime), startTime.Format("15:04:05.000"))) +} + +func durationToMilliseconds(timeDuration time.Duration) int64 { + return timeDuration.Nanoseconds() / 1e6 +} + +type traceItem interface { + // time returns when the trace was recorded as completed. + time() time.Time + // writeItem outputs the traceItem to the buffer. If stepThreshold is non-nil, only output the + // traceItem if its the duration exceeds the stepThreshold. + // Each line of output is prefixed by formatter to visually indent nested items. + writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) +} + +type traceStep struct { + stepTime time.Time + msg string + fields []Field +} + +func (s traceStep) time() time.Time { + return s.stepTime +} + +func (s traceStep) writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) { + stepDuration := s.stepTime.Sub(startTime) + if stepThreshold == nil || *stepThreshold == 0 || stepDuration >= *stepThreshold || klogV(4) { + b.WriteString(fmt.Sprintf("%s---", formatter)) + writeTraceItemSummary(b, s.msg, stepDuration, s.stepTime, s.fields) + } +} + +// Trace keeps track of a set of "steps" and allows us to log a specific +// step if it took longer than its share of the total allowed time +type Trace struct { + // constant fields + name string + fields []Field + startTime time.Time + parentTrace *Trace + // fields guarded by a lock + lock sync.RWMutex + threshold *time.Duration + endTime *time.Time + traceItems []traceItem +} + +func (t *Trace) time() time.Time { + if t.endTime != nil { + return *t.endTime + } + return t.startTime // if the trace is incomplete, don't assume an end time +} + +func (t *Trace) writeItem(b *bytes.Buffer, formatter string, startTime time.Time, stepThreshold *time.Duration) { + if t.durationIsWithinThreshold() || klogV(4) { + b.WriteString(fmt.Sprintf("%v[", formatter)) + writeTraceItemSummary(b, t.name, t.TotalTime(), t.startTime, t.fields) + if st := t.calculateStepThreshold(); st != nil { + stepThreshold = st + } + t.writeTraceSteps(b, formatter+" ", stepThreshold) + b.WriteString("]") + return + } + // If the trace should not be written, still check for nested traces that should be written + for _, s := range t.traceItems { + if nestedTrace, ok := s.(*Trace); ok { + nestedTrace.writeItem(b, formatter, startTime, stepThreshold) + } + } +} + +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} +} + +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { + t.lock.Lock() + defer t.lock.Unlock() + if t.traceItems == nil { + // traces almost always have less than 6 steps, do this to avoid more than a single allocation + t.traceItems = make([]traceItem, 0, 6) + } + t.traceItems = append(t.traceItems, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) +} + +// Nest adds a nested trace with the given message and fields and returns it. +// As a convenience, if the receiver is nil, returns a top level trace. This allows +// one to call FromContext(ctx).Nest without having to check if the trace +// in the context is nil. +func (t *Trace) Nest(msg string, fields ...Field) *Trace { + newTrace := New(msg, fields...) + if t != nil { + newTrace.parentTrace = t + t.lock.Lock() + t.traceItems = append(t.traceItems, newTrace) + t.lock.Unlock() + } + return newTrace +} + +// Log is used to dump all the steps in the Trace. It also logs the nested trace messages using indentation. +// If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it is nested within +// is logged. +func (t *Trace) Log() { + endTime := time.Now() + t.lock.Lock() + t.endTime = &endTime + t.lock.Unlock() + // an explicit logging request should dump all the steps out at the higher level + if t.parentTrace == nil { // We don't start logging until Log or LogIfLong is called on the root trace + t.logTrace() + } +} + +// LogIfLong only logs the trace if the duration of the trace exceeds the threshold. +// Only steps that took longer than their share or the given threshold are logged. +// If klog is at verbosity level 4 or higher and the trace took longer than the threshold, +// all substeps and subtraces are logged. Otherwise, only those which took longer than +// their own threshold. +// If the Trace is nested it is not immediately logged. Instead, it is logged when the trace it +// is nested within is logged. +func (t *Trace) LogIfLong(threshold time.Duration) { + t.lock.Lock() + t.threshold = &threshold + t.lock.Unlock() + t.Log() +} + +// logTopLevelTraces finds all traces in a hierarchy of nested traces that should be logged but do not have any +// parents that will be logged, due to threshold limits, and logs them as top level traces. +func (t *Trace) logTrace() { + t.lock.RLock() + defer t.lock.RUnlock() + if t.durationIsWithinThreshold() { + var buffer bytes.Buffer + traceNum := rand.Int31() + + totalTime := t.endTime.Sub(t.startTime) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", traceNum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + + // if any step took more than it's share of the total allowed time, it deserves a higher log level + buffer.WriteString(fmt.Sprintf("(%v) (total time: %vms):", t.startTime.Format("02-Jan-2006 15:04:05.000"), totalTime.Milliseconds())) + stepThreshold := t.calculateStepThreshold() + t.writeTraceSteps(&buffer, fmt.Sprintf("\nTrace[%d]: ", traceNum), stepThreshold) + buffer.WriteString(fmt.Sprintf("\nTrace[%d]: [%v] [%v] END\n", traceNum, t.endTime.Sub(t.startTime), totalTime)) + + klog.Info(buffer.String()) + return + } + + // If the trace should not be logged, still check if nested traces should be logged + for _, s := range t.traceItems { + if nestedTrace, ok := s.(*Trace); ok { + nestedTrace.logTrace() + } + } +} + +func (t *Trace) writeTraceSteps(b *bytes.Buffer, formatter string, stepThreshold *time.Duration) { + lastStepTime := t.startTime + for _, stepOrTrace := range t.traceItems { + stepOrTrace.writeItem(b, formatter, lastStepTime, stepThreshold) + lastStepTime = stepOrTrace.time() + } +} + +func (t *Trace) durationIsWithinThreshold() bool { + if t.endTime == nil { // we don't assume incomplete traces meet the threshold + return false + } + return t.threshold == nil || *t.threshold == 0 || t.endTime.Sub(t.startTime) >= *t.threshold +} + +// TotalTime can be used to figure out how long it took since the Trace was created +func (t *Trace) TotalTime() time.Duration { + return time.Since(t.startTime) +} + +// calculateStepThreshold returns a threshold for the individual steps of a trace, or nil if there is no threshold and +// all steps should be written. +func (t *Trace) calculateStepThreshold() *time.Duration { + if t.threshold == nil { + return nil + } + lenTrace := len(t.traceItems) + 1 + traceThreshold := *t.threshold + for _, s := range t.traceItems { + nestedTrace, ok := s.(*Trace) + if ok { + nestedTrace.lock.RLock() + if nestedTrace.threshold != nil { + traceThreshold = traceThreshold - *nestedTrace.threshold + lenTrace-- + } + nestedTrace.lock.RUnlock() + } + } + + // the limit threshold is used when the threshold( + //remaining after subtracting that of the child trace) is getting very close to zero to prevent unnecessary logging + limitThreshold := *t.threshold / 4 + if traceThreshold < limitThreshold { + traceThreshold = limitThreshold + lenTrace = len(t.traceItems) + 1 + } + + stepThreshold := traceThreshold / time.Duration(lenTrace) + return &stepThreshold +} + +// ContextTraceKey provides a common key for traces in context.Context values. +type ContextTraceKey struct{} + +// FromContext returns the trace keyed by ContextTraceKey in the context values, if one +// is present, or nil If there is no trace in the Context. +// It is safe to call Nest() on the returned value even if it is nil because ((*Trace)nil).Nest returns a top level +// trace. +func FromContext(ctx context.Context) *Trace { + if v, ok := ctx.Value(ContextTraceKey{}).(*Trace); ok { + return v + } + return nil +} + +// ContextWithTrace returns a context with trace included in the context values, keyed by ContextTraceKey. +func ContextWithTrace(ctx context.Context, trace *Trace) context.Context { + return context.WithValue(ctx, ContextTraceKey{}, trace) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 67646fc8..a45d513c 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,22 +1,49 @@ +# github.com/crossplane/crossplane-runtime v0.19.1 +## explicit; go 1.18 +github.com/crossplane/crossplane-runtime/apis/common/v1 +github.com/crossplane/crossplane-runtime/pkg/errors +github.com/crossplane/crossplane-runtime/pkg/meta +github.com/crossplane/crossplane-runtime/pkg/resource # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/emicklei/go-restful/v3 v3.9.0 +# github.com/edgefarm/provider-nats v0.2.0 +## explicit; go 1.19 +github.com/edgefarm/provider-nats/apis/consumer/v1alpha1 +github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer +github.com/edgefarm/provider-nats/apis/consumer/v1alpha1/consumer/errors +github.com/edgefarm/provider-nats/apis/stream/v1alpha1 +github.com/edgefarm/provider-nats/apis/stream/v1alpha1/stream +github.com/edgefarm/provider-nats/apis/v1alpha1 +github.com/edgefarm/provider-nats/internal/convert +# github.com/edgefarm/provider-natssecrets v0.2.2 +## explicit; go 1.20 +github.com/edgefarm/provider-natssecrets/apis/account/v1alpha1 +github.com/edgefarm/provider-natssecrets/apis/user/v1alpha1 +# github.com/edgefarm/vault-plugin-secrets-nats v1.1.0 +## explicit; go 1.18 +github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/account/v1alpha1 +github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/common +github.com/edgefarm/vault-plugin-secrets-nats/pkg/claims/user/v1alpha1 +# github.com/emicklei/go-restful/v3 v3.10.1 ## explicit; go 1.13 github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log +# github.com/evanphx/json-patch/v5 v5.6.0 +## explicit; go 1.12 +github.com/evanphx/json-patch/v5 # github.com/go-logr/logr v1.2.3 ## explicit; go 1.16 github.com/go-logr/logr -# github.com/go-openapi/jsonpointer v0.19.5 +# github.com/go-openapi/jsonpointer v0.19.6 ## explicit; go 1.13 github.com/go-openapi/jsonpointer -# github.com/go-openapi/jsonreference v0.20.0 +# github.com/go-openapi/jsonreference v0.20.2 ## explicit; go 1.13 github.com/go-openapi/jsonreference github.com/go-openapi/jsonreference/internal -# github.com/go-openapi/swag v0.19.14 -## explicit; go 1.11 +# github.com/go-openapi/swag v0.22.3 +## explicit; go 1.18 github.com/go-openapi/swag # github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 @@ -29,7 +56,7 @@ github.com/golang/protobuf/ptypes github.com/golang/protobuf/ptypes/any github.com/golang/protobuf/ptypes/duration github.com/golang/protobuf/ptypes/timestamp -# github.com/google/gnostic v0.5.7-v3refs +# github.com/google/gnostic v0.6.9 ## explicit; go 1.12 github.com/google/gnostic/compiler github.com/google/gnostic/extensions @@ -43,11 +70,12 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/gofuzz v1.1.0 +# github.com/google/gofuzz v1.2.0 ## explicit; go 1.12 github.com/google/gofuzz -# github.com/imdario/mergo v0.3.6 -## explicit +github.com/google/gofuzz/bytesource +# github.com/imdario/mergo v0.3.13 +## explicit; go 1.13 github.com/imdario/mergo # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 @@ -55,7 +83,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/mailru/easyjson v0.7.6 +# github.com/mailru/easyjson v0.7.7 ## explicit; go 1.12 github.com/mailru/easyjson/buffer github.com/mailru/easyjson/jlexer @@ -69,25 +97,53 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/nats-io/jwt/v2 v2.3.0 +## explicit; go 1.16 +github.com/nats-io/jwt/v2 +# github.com/nats-io/nats.go v1.24.0 +## explicit; go 1.19 +github.com/nats-io/nats.go +github.com/nats-io/nats.go/encoders/builtin +github.com/nats-io/nats.go/util +# github.com/nats-io/nkeys v0.3.0 +## explicit; go 1.16 +github.com/nats-io/nkeys +# github.com/nats-io/nuid v1.0.1 +## explicit +github.com/nats-io/nuid +# github.com/pkg/errors v0.9.1 +## explicit +github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib +# github.com/spf13/afero v1.9.4 +## explicit; go 1.16 +github.com/spf13/afero +github.com/spf13/afero/internal/common +github.com/spf13/afero/mem # github.com/spf13/pflag v1.0.5 ## explicit; go 1.12 github.com/spf13/pflag # github.com/stretchr/testify v1.8.2 ## explicit; go 1.13 github.com/stretchr/testify/assert +# golang.org/x/crypto v0.6.0 +## explicit; go 1.17 +golang.org/x/crypto/ed25519 +# golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 +## explicit; go 1.18 +golang.org/x/exp/constraints +golang.org/x/exp/slices # golang.org/x/net v0.7.0 ## explicit; go 1.17 golang.org/x/net/context -golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2 -## explicit; go 1.15 +# golang.org/x/oauth2 v0.5.0 +## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sys v0.5.0 @@ -101,11 +157,12 @@ golang.org/x/sys/windows golang.org/x/term # golang.org/x/text v0.7.0 ## explicit; go 1.17 +golang.org/x/text/runes golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.0.0-20220411224347-583f2d630306 +# golang.org/x/time v0.3.0 ## explicit golang.org/x/time/rate # google.golang.org/appengine v1.6.7 @@ -217,8 +274,11 @@ k8s.io/api/storage/v1beta1 k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/internalversion +k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -233,21 +293,26 @@ k8s.io/apimachinery/pkg/runtime/serializer/streaming k8s.io/apimachinery/pkg/runtime/serializer/versioning k8s.io/apimachinery/pkg/selection k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/cache +k8s.io/apimachinery/pkg/util/diff k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json k8s.io/apimachinery/pkg/util/managedfields +k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net k8s.io/apimachinery/pkg/util/runtime k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/strategicpatch k8s.io/apimachinery/pkg/util/validation k8s.io/apimachinery/pkg/util/validation/field k8s.io/apimachinery/pkg/util/wait k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/client-go v0.26.3 ## explicit; go 1.19 @@ -352,6 +417,7 @@ k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/storage/v1 k8s.io/client-go/kubernetes/typed/storage/v1alpha1 k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/metadata k8s.io/client-go/openapi k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/install @@ -361,12 +427,15 @@ k8s.io/client-go/pkg/version k8s.io/client-go/plugin/pkg/client/auth/exec k8s.io/client-go/rest k8s.io/client-go/rest/watch +k8s.io/client-go/restmapper k8s.io/client-go/tools/auth +k8s.io/client-go/tools/cache k8s.io/client-go/tools/clientcmd k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/clientcmd/api/latest k8s.io/client-go/tools/clientcmd/api/v1 k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/pager k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert @@ -374,8 +443,9 @@ k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil +k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/klog/v2 v2.80.1 +# k8s.io/klog/v2 v2.90.0 ## explicit; go 1.13 k8s.io/klog/v2 k8s.io/klog/v2/internal/buffer @@ -383,7 +453,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 +# k8s.io/kube-openapi v0.0.0-20230224204730-66828de6f33b ## explicit; go 1.18 k8s.io/kube-openapi/pkg/builder3/util k8s.io/kube-openapi/pkg/common @@ -397,15 +467,32 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/utils v0.0.0-20221107191617-1a15be271d1d +# k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 ## explicit; go 1.18 +k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/net k8s.io/utils/strings/slices -# sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 +k8s.io/utils/trace +# sigs.k8s.io/controller-runtime v0.14.4 +## explicit; go 1.19 +sigs.k8s.io/controller-runtime/pkg/cache +sigs.k8s.io/controller-runtime/pkg/cache/internal +sigs.k8s.io/controller-runtime/pkg/client +sigs.k8s.io/controller-runtime/pkg/client/apiutil +sigs.k8s.io/controller-runtime/pkg/event +sigs.k8s.io/controller-runtime/pkg/internal/field/selector +sigs.k8s.io/controller-runtime/pkg/internal/log +sigs.k8s.io/controller-runtime/pkg/internal/objectutil +sigs.k8s.io/controller-runtime/pkg/log +sigs.k8s.io/controller-runtime/pkg/predicate +sigs.k8s.io/controller-runtime/pkg/reconcile +sigs.k8s.io/controller-runtime/pkg/runtime/inject +sigs.k8s.io/controller-runtime/pkg/scheme +# sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json diff --git a/vendor/sigs.k8s.io/controller-runtime/LICENSE b/vendor/sigs.k8s.io/controller-runtime/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go new file mode 100644 index 00000000..9827ea02 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/cache.go @@ -0,0 +1,516 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "reflect" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" +) + +var log = logf.RuntimeLog.WithName("object-cache") + +// Cache knows how to load Kubernetes objects, fetch informers to request +// to receive events for Kubernetes objects (at a low-level), +// and add indices to fields on the objects stored in the cache. +type Cache interface { + // Cache acts as a client to objects stored in the cache. + client.Reader + + // Cache loads informers and adds field indices. + Informers +} + +// Informers knows how to create or fetch informers for different +// group-version-kinds, and add indices to those informers. It's safe to call +// GetInformer from multiple threads. +type Informers interface { + // GetInformer fetches or constructs an informer for the given object that corresponds to a single + // API kind and resource. + GetInformer(ctx context.Context, obj client.Object) (Informer, error) + + // GetInformerForKind is similar to GetInformer, except that it takes a group-version-kind, instead + // of the underlying object. + GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) + + // Start runs all the informers known to this cache until the context is closed. + // It blocks. + Start(ctx context.Context) error + + // WaitForCacheSync waits for all the caches to sync. Returns false if it could not sync a cache. + WaitForCacheSync(ctx context.Context) bool + + // Informers knows how to add indices to the caches (informers) that it manages. + client.FieldIndexer +} + +// Informer - informer allows you interact with the underlying informer. +type Informer interface { + // AddEventHandler adds an event handler to the shared informer using the shared informer's resync + // period. Events to a single handler are delivered sequentially, but there is no coordination + // between different handlers. + // It returns a registration handle for the handler that can be used to remove + // the handler again. + AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) + // AddEventHandlerWithResyncPeriod adds an event handler to the shared informer using the + // specified resync period. Events to a single handler are delivered sequentially, but there is + // no coordination between different handlers. + // It returns a registration handle for the handler that can be used to remove + // the handler again and an error if the handler cannot be added. + AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) + // RemoveEventHandler removes a formerly added event handler given by + // its registration handle. + // This function is guaranteed to be idempotent, and thread-safe. + RemoveEventHandler(handle toolscache.ResourceEventHandlerRegistration) error + // AddIndexers adds more indexers to this store. If you call this after you already have data + // in the store, the results are undefined. + AddIndexers(indexers toolscache.Indexers) error + // HasSynced return true if the informers underlying store has synced. + HasSynced() bool +} + +// ObjectSelector is an alias name of internal.Selector. +type ObjectSelector internal.Selector + +// SelectorsByObject associate a client.Object's GVK to a field/label selector. +// There is also `DefaultSelector` to set a global default (which will be overridden by +// a more specific setting here, if any). +type SelectorsByObject map[client.Object]ObjectSelector + +// Options are the optional arguments for creating a new InformersMap object. +type Options struct { + // Scheme is the scheme to use for mapping objects to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper is the RESTMapper to use for mapping GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Resync is the base frequency the informers are resynced. + // Defaults to defaultResyncTime. + // A 10 percent jitter will be added to the Resync period between informers + // So that all informers will not send list requests simultaneously. + Resync *time.Duration + + // Namespace restricts the cache's ListWatch to the desired namespace + // Default watches all namespaces + Namespace string + + // SelectorsByObject restricts the cache's ListWatch to the desired + // fields per GVK at the specified object, the map's value must implement + // Selector [1] using for example a Set [2] + // [1] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Selector + // [2] https://pkg.go.dev/k8s.io/apimachinery/pkg/fields#Set + SelectorsByObject SelectorsByObject + + // DefaultSelector will be used as selectors for all object types + // that do not have a selector in SelectorsByObject defined. + DefaultSelector ObjectSelector + + // UnsafeDisableDeepCopyByObject indicates not to deep copy objects during get or + // list objects per GVK at the specified object. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + UnsafeDisableDeepCopyByObject DisableDeepCopyByObject + + // TransformByObject is a map from GVKs to transformer functions which + // get applied when objects of the transformation are about to be committed + // to cache. + // + // This function is called both for new objects to enter the cache, + // and for updated objects. + TransformByObject TransformByObject + + // DefaultTransform is the transform used for all GVKs which do + // not have an explicit transform func set in TransformByObject + DefaultTransform toolscache.TransformFunc +} + +var defaultResyncTime = 10 * time.Hour + +// New initializes and returns a new Cache. +func New(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + selectorsByGVK, err := convertToByGVK(opts.SelectorsByObject, opts.DefaultSelector, opts.Scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(opts.UnsafeDisableDeepCopyByObject, opts.Scheme) + if err != nil { + return nil, err + } + transformByGVK, err := convertToByGVK(opts.TransformByObject, opts.DefaultTransform, opts.Scheme) + if err != nil { + return nil, err + } + transformByObj := internal.TransformFuncByObjectFromMap(transformByGVK) + + internalSelectorsByGVK := internal.SelectorsByGVK{} + for gvk, selector := range selectorsByGVK { + internalSelectorsByGVK[gvk] = internal.Selector(selector) + } + + im := internal.NewInformersMap(config, opts.Scheme, opts.Mapper, *opts.Resync, opts.Namespace, internalSelectorsByGVK, disableDeepCopyByGVK, transformByObj) + return &informerCache{InformersMap: im}, nil +} + +// BuilderWithOptions returns a Cache constructor that will build a cache +// honoring the options argument, this is useful to specify options like +// SelectorsByObject +// WARNING: If SelectorsByObject is specified, filtered out resources are not +// returned. +// WARNING: If UnsafeDisableDeepCopy is enabled, you must DeepCopy any object +// returned from cache get/list before mutating it. +func BuilderWithOptions(options Options) NewCacheFunc { + return func(config *rest.Config, inherited Options) (Cache, error) { + var err error + inherited, err = defaultOpts(config, inherited) + if err != nil { + return nil, err + } + options, err = defaultOpts(config, options) + if err != nil { + return nil, err + } + combined, err := options.inheritFrom(inherited) + if err != nil { + return nil, err + } + return New(config, *combined) + } +} + +func (options Options) inheritFrom(inherited Options) (*Options, error) { + var ( + combined Options + err error + ) + combined.Scheme = combineScheme(inherited.Scheme, options.Scheme) + combined.Mapper = selectMapper(inherited.Mapper, options.Mapper) + combined.Resync = selectResync(inherited.Resync, options.Resync) + combined.Namespace = selectNamespace(inherited.Namespace, options.Namespace) + combined.SelectorsByObject, combined.DefaultSelector, err = combineSelectors(inherited, options, combined.Scheme) + if err != nil { + return nil, err + } + combined.UnsafeDisableDeepCopyByObject, err = combineUnsafeDeepCopy(inherited, options, combined.Scheme) + if err != nil { + return nil, err + } + combined.TransformByObject, combined.DefaultTransform, err = combineTransforms(inherited, options, combined.Scheme) + if err != nil { + return nil, err + } + return &combined, nil +} + +func combineScheme(schemes ...*runtime.Scheme) *runtime.Scheme { + var out *runtime.Scheme + for _, sch := range schemes { + if sch == nil { + continue + } + for gvk, t := range sch.AllKnownTypes() { + if out == nil { + out = runtime.NewScheme() + } + out.AddKnownTypeWithName(gvk, reflect.New(t).Interface().(runtime.Object)) + } + } + return out +} + +func selectMapper(def, override meta.RESTMapper) meta.RESTMapper { + if override != nil { + return override + } + return def +} + +func selectResync(def, override *time.Duration) *time.Duration { + if override != nil { + return override + } + return def +} + +func selectNamespace(def, override string) string { + if override != "" { + return override + } + return def +} + +func combineSelectors(inherited, options Options, scheme *runtime.Scheme) (SelectorsByObject, ObjectSelector, error) { + // Selectors are combined via logical AND. + // - Combined label selector is a union of the selectors requirements from both sets of options. + // - Combined field selector uses fields.AndSelectors with the combined list of non-nil field selectors + // defined in both sets of options. + // + // There is a bunch of complexity here because we need to convert to SelectorsByGVK + // to be able to match keys between options and inherited and then convert back to SelectorsByObject + optionsSelectorsByGVK, err := convertToByGVK(options.SelectorsByObject, options.DefaultSelector, scheme) + if err != nil { + return nil, ObjectSelector{}, err + } + inheritedSelectorsByGVK, err := convertToByGVK(inherited.SelectorsByObject, inherited.DefaultSelector, inherited.Scheme) + if err != nil { + return nil, ObjectSelector{}, err + } + + for gvk, inheritedSelector := range inheritedSelectorsByGVK { + optionsSelectorsByGVK[gvk] = combineSelector(inheritedSelector, optionsSelectorsByGVK[gvk]) + } + return convertToByObject(optionsSelectorsByGVK, scheme) +} + +func combineSelector(selectors ...ObjectSelector) ObjectSelector { + ls := make([]labels.Selector, 0, len(selectors)) + fs := make([]fields.Selector, 0, len(selectors)) + for _, s := range selectors { + ls = append(ls, s.Label) + fs = append(fs, s.Field) + } + return ObjectSelector{ + Label: combineLabelSelectors(ls...), + Field: combineFieldSelectors(fs...), + } +} + +func combineLabelSelectors(ls ...labels.Selector) labels.Selector { + var combined labels.Selector + for _, l := range ls { + if l == nil { + continue + } + if combined == nil { + combined = labels.NewSelector() + } + reqs, _ := l.Requirements() + combined = combined.Add(reqs...) + } + return combined +} + +func combineFieldSelectors(fs ...fields.Selector) fields.Selector { + nonNil := fs[:0] + for _, f := range fs { + if f == nil { + continue + } + nonNil = append(nonNil, f) + } + if len(nonNil) == 0 { + return nil + } + if len(nonNil) == 1 { + return nonNil[0] + } + return fields.AndSelectors(nonNil...) +} + +func combineUnsafeDeepCopy(inherited, options Options, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { + // UnsafeDisableDeepCopyByObject is combined via precedence. Only if a value for a particular GVK is unset + // in options will a value from inherited be used. + optionsDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(options.UnsafeDisableDeepCopyByObject, options.Scheme) + if err != nil { + return nil, err + } + inheritedDisableDeepCopyByGVK, err := convertToDisableDeepCopyByGVK(inherited.UnsafeDisableDeepCopyByObject, inherited.Scheme) + if err != nil { + return nil, err + } + + for gvk, inheritedDeepCopy := range inheritedDisableDeepCopyByGVK { + if _, ok := optionsDisableDeepCopyByGVK[gvk]; !ok { + if optionsDisableDeepCopyByGVK == nil { + optionsDisableDeepCopyByGVK = map[schema.GroupVersionKind]bool{} + } + optionsDisableDeepCopyByGVK[gvk] = inheritedDeepCopy + } + } + return convertToDisableDeepCopyByObject(optionsDisableDeepCopyByGVK, scheme) +} + +func combineTransforms(inherited, options Options, scheme *runtime.Scheme) (TransformByObject, toolscache.TransformFunc, error) { + // Transform functions are combined via chaining. If both inherited and options define a transform + // function, the transform function from inherited will be called first, and the transform function from + // options will be called second. + optionsTransformByGVK, err := convertToByGVK(options.TransformByObject, options.DefaultTransform, options.Scheme) + if err != nil { + return nil, nil, err + } + inheritedTransformByGVK, err := convertToByGVK(inherited.TransformByObject, inherited.DefaultTransform, inherited.Scheme) + if err != nil { + return nil, nil, err + } + + for gvk, inheritedTransform := range inheritedTransformByGVK { + if optionsTransformByGVK == nil { + optionsTransformByGVK = map[schema.GroupVersionKind]toolscache.TransformFunc{} + } + optionsTransformByGVK[gvk] = combineTransform(inheritedTransform, optionsTransformByGVK[gvk]) + } + return convertToByObject(optionsTransformByGVK, scheme) +} + +func combineTransform(inherited, current toolscache.TransformFunc) toolscache.TransformFunc { + if inherited == nil { + return current + } + if current == nil { + return inherited + } + return func(in interface{}) (interface{}, error) { + mid, err := inherited(in) + if err != nil { + return nil, err + } + return current(mid) + } +} + +func defaultOpts(config *rest.Config, opts Options) (Options, error) { + // Use the default Kubernetes Scheme if unset + if opts.Scheme == nil { + opts.Scheme = scheme.Scheme + } + + // Construct a new Mapper if unset + if opts.Mapper == nil { + var err error + opts.Mapper, err = apiutil.NewDiscoveryRESTMapper(config) + if err != nil { + log.WithName("setup").Error(err, "Failed to get API Group-Resources") + return opts, fmt.Errorf("could not create RESTMapper from config") + } + } + + // Default the resync period to 10 hours if unset + if opts.Resync == nil { + opts.Resync = &defaultResyncTime + } + return opts, nil +} + +func convertToByGVK[T any](byObject map[client.Object]T, def T, scheme *runtime.Scheme) (map[schema.GroupVersionKind]T, error) { + byGVK := map[schema.GroupVersionKind]T{} + for object, value := range byObject { + gvk, err := apiutil.GVKForObject(object, scheme) + if err != nil { + return nil, err + } + byGVK[gvk] = value + } + byGVK[schema.GroupVersionKind{}] = def + return byGVK, nil +} + +func convertToByObject[T any](byGVK map[schema.GroupVersionKind]T, scheme *runtime.Scheme) (map[client.Object]T, T, error) { + var byObject map[client.Object]T + def := byGVK[schema.GroupVersionKind{}] + for gvk, value := range byGVK { + if gvk == (schema.GroupVersionKind{}) { + continue + } + obj, err := scheme.New(gvk) + if err != nil { + return nil, def, err + } + cObj, ok := obj.(client.Object) + if !ok { + return nil, def, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) + } + if byObject == nil { + byObject = map[client.Object]T{} + } + byObject[cObj] = value + } + return byObject, def, nil +} + +// DisableDeepCopyByObject associate a client.Object's GVK to disable DeepCopy during get or list from cache. +type DisableDeepCopyByObject map[client.Object]bool + +var _ client.Object = &ObjectAll{} + +// ObjectAll is the argument to represent all objects' types. +type ObjectAll struct { + client.Object +} + +func convertToDisableDeepCopyByGVK(disableDeepCopyByObject DisableDeepCopyByObject, scheme *runtime.Scheme) (internal.DisableDeepCopyByGVK, error) { + disableDeepCopyByGVK := internal.DisableDeepCopyByGVK{} + for obj, disable := range disableDeepCopyByObject { + switch obj.(type) { + case ObjectAll, *ObjectAll: + disableDeepCopyByGVK[internal.GroupVersionKindAll] = disable + default: + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return nil, err + } + disableDeepCopyByGVK[gvk] = disable + } + } + return disableDeepCopyByGVK, nil +} + +func convertToDisableDeepCopyByObject(byGVK internal.DisableDeepCopyByGVK, scheme *runtime.Scheme) (DisableDeepCopyByObject, error) { + var byObject DisableDeepCopyByObject + for gvk, value := range byGVK { + if byObject == nil { + byObject = DisableDeepCopyByObject{} + } + if gvk == (schema.GroupVersionKind{}) { + byObject[ObjectAll{}] = value + continue + } + obj, err := scheme.New(gvk) + if err != nil { + return nil, err + } + cObj, ok := obj.(client.Object) + if !ok { + return nil, fmt.Errorf("object %T for GVK %q does not implement client.Object", obj, gvk) + } + + byObject[cObj] = value + } + return byObject, nil +} + +// TransformByObject associate a client.Object's GVK to a transformer function +// to be applied when storing the object into the cache. +type TransformByObject map[client.Object]toolscache.TransformFunc diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go new file mode 100644 index 00000000..e1742ac0 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package cache provides object caches that act as caching client.Reader +// instances and help drive Kubernetes-object-based event handlers. +package cache diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go new file mode 100644 index 00000000..08e4e6df --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/informer_cache.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "reflect" + "strings" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/cache/internal" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +var ( + _ Informers = &informerCache{} + _ client.Reader = &informerCache{} + _ Cache = &informerCache{} +) + +// ErrCacheNotStarted is returned when trying to read from the cache that wasn't started. +type ErrCacheNotStarted struct{} + +func (*ErrCacheNotStarted) Error() string { + return "the cache is not started, can not read objects" +} + +// informerCache is a Kubernetes Object cache populated from InformersMap. informerCache wraps an InformersMap. +type informerCache struct { + *internal.InformersMap +} + +// Get implements Reader. +func (ip *informerCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + gvk, err := apiutil.GVKForObject(out, ip.Scheme) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, gvk, out) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + return cache.Reader.Get(ctx, key, out) +} + +// List implements Reader. +func (ip *informerCache) List(ctx context.Context, out client.ObjectList, opts ...client.ListOption) error { + gvk, cacheTypeObj, err := ip.objectTypeForListObject(out) + if err != nil { + return err + } + + started, cache, err := ip.InformersMap.Get(ctx, *gvk, cacheTypeObj) + if err != nil { + return err + } + + if !started { + return &ErrCacheNotStarted{} + } + + return cache.Reader.List(ctx, out, opts...) +} + +// objectTypeForListObject tries to find the runtime.Object and associated GVK +// for a single object corresponding to the passed-in list type. We need them +// because they are used as cache map key. +func (ip *informerCache) objectTypeForListObject(list client.ObjectList) (*schema.GroupVersionKind, runtime.Object, error) { + gvk, err := apiutil.GVKForObject(list, ip.Scheme) + if err != nil { + return nil, nil, err + } + + // we need the non-list GVK, so chop off the "List" from the end of the kind + if strings.HasSuffix(gvk.Kind, "List") && apimeta.IsListType(list) { + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + _, isUnstructured := list.(*unstructured.UnstructuredList) + var cacheTypeObj runtime.Object + if isUnstructured { + u := &unstructured.Unstructured{} + u.SetGroupVersionKind(gvk) + cacheTypeObj = u + } else { + itemsPtr, err := apimeta.GetItemsPtr(list) + if err != nil { + return nil, nil, err + } + // http://knowyourmeme.com/memes/this-is-fine + elemType := reflect.Indirect(reflect.ValueOf(itemsPtr)).Type().Elem() + if elemType.Kind() != reflect.Ptr { + elemType = reflect.PtrTo(elemType) + } + + cacheTypeValue := reflect.Zero(elemType) + var ok bool + cacheTypeObj, ok = cacheTypeValue.Interface().(runtime.Object) + if !ok { + return nil, nil, fmt.Errorf("cannot get cache for %T, its element %T is not a runtime.Object", list, cacheTypeValue.Interface()) + } + } + + return &gvk, cacheTypeObj, nil +} + +// GetInformerForKind returns the informer for the GroupVersionKind. +func (ip *informerCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + // Map the gvk to an object + obj, err := ip.Scheme.New(gvk) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// GetInformer returns the informer for the obj. +func (ip *informerCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + gvk, err := apiutil.GVKForObject(obj, ip.Scheme) + if err != nil { + return nil, err + } + + _, i, err := ip.InformersMap.Get(ctx, gvk, obj) + if err != nil { + return nil, err + } + return i.Informer, err +} + +// NeedLeaderElection implements the LeaderElectionRunnable interface +// to indicate that this can be started without requiring the leader lock. +func (ip *informerCache) NeedLeaderElection() bool { + return false +} + +// IndexField adds an indexer to the underlying cache, using extraction function to get +// value(s) from the given field. This index can then be used by passing a field selector +// to List. For one-to-one compatibility with "normal" field selectors, only return one value. +// The values may be anything. They will automatically be prefixed with the namespace of the +// given object, if present. The objects passed are guaranteed to be objects of the correct type. +func (ip *informerCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + informer, err := ip.GetInformer(ctx, obj) + if err != nil { + return err + } + return indexByField(informer, field, extractValue) +} + +func indexByField(indexer Informer, field string, extractor client.IndexerFunc) error { + indexFunc := func(objRaw interface{}) ([]string, error) { + // TODO(directxman12): check if this is the correct type? + obj, isObj := objRaw.(client.Object) + if !isObj { + return nil, fmt.Errorf("object of type %T is not an Object", objRaw) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + ns := meta.GetNamespace() + + rawVals := extractor(obj) + var vals []string + if ns == "" { + // if we're not doubling the keys for the namespaced case, just create a new slice with same length + vals = make([]string, len(rawVals)) + } else { + // if we need to add non-namespaced versions too, double the length + vals = make([]string, len(rawVals)*2) + } + for i, rawVal := range rawVals { + // save a namespaced variant, so that we can ask + // "what are all the object matching a given index *in a given namespace*" + vals[i] = internal.KeyToNamespacedKey(ns, rawVal) + if ns != "" { + // if we have a namespace, also inject a special index key for listing + // regardless of the object namespace + vals[i+len(rawVals)] = internal.KeyToNamespacedKey("", rawVal) + } + } + + return vals, nil + } + + return indexer.AddIndexers(cache.Indexers{internal.FieldIndexName(field): indexFunc}) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go new file mode 100644 index 00000000..f78b0833 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/cache_reader.go @@ -0,0 +1,204 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "reflect" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/internal/field/selector" + + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// CacheReader is a client.Reader. +var _ client.Reader = &CacheReader{} + +// CacheReader wraps a cache.Index to implement the client.CacheReader interface for a single type. +type CacheReader struct { + // indexer is the underlying indexer wrapped by this cache. + indexer cache.Indexer + + // groupVersionKind is the group-version-kind of the resource. + groupVersionKind schema.GroupVersionKind + + // scopeName is the scope of the resource (namespaced or cluster-scoped). + scopeName apimeta.RESTScopeName + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + disableDeepCopy bool +} + +// Get checks the indexer for the object and writes a copy of it if found. +func (c *CacheReader) Get(_ context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + if c.scopeName == apimeta.RESTScopeNameRoot { + key.Namespace = "" + } + storeKey := objectKeyToStoreKey(key) + + // Lookup the object from the indexer cache + obj, exists, err := c.indexer.GetByKey(storeKey) + if err != nil { + return err + } + + // Not found, return an error + if !exists { + // Resource gets transformed into Kind in the error anyway, so this is fine + return apierrors.NewNotFound(schema.GroupResource{ + Group: c.groupVersionKind.Group, + Resource: c.groupVersionKind.Kind, + }, key.Name) + } + + // Verify the result is a runtime.Object + if _, isObj := obj.(runtime.Object); !isObj { + // This should never happen + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + + if c.disableDeepCopy { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + } else { + // deep copy to avoid mutating cache + obj = obj.(runtime.Object).DeepCopyObject() + } + + // Copy the value of the item in the cache to the returned value + // TODO(directxman12): this is a terrible hack, pls fix (we should have deepcopyinto) + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(obj) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + if !c.disableDeepCopy { + out.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + + return nil +} + +// List lists items out of the indexer and writes them to out. +func (c *CacheReader) List(_ context.Context, out client.ObjectList, opts ...client.ListOption) error { + var objs []interface{} + var err error + + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + switch { + case listOpts.FieldSelector != nil: + // TODO(directxman12): support more complicated field selectors by + // combining multiple indices, GetIndexers, etc + field, val, requiresExact := selector.RequiresExactMatch(listOpts.FieldSelector) + if !requiresExact { + return fmt.Errorf("non-exact field matches are not supported by the cache") + } + // list all objects by the field selector. If this is namespaced and we have one, ask for the + // namespaced index key. Otherwise, ask for the non-namespaced variant by using the fake "all namespaces" + // namespace. + objs, err = c.indexer.ByIndex(FieldIndexName(field), KeyToNamespacedKey(listOpts.Namespace, val)) + case listOpts.Namespace != "": + objs, err = c.indexer.ByIndex(cache.NamespaceIndex, listOpts.Namespace) + default: + objs = c.indexer.List() + } + if err != nil { + return err + } + var labelSel labels.Selector + if listOpts.LabelSelector != nil { + labelSel = listOpts.LabelSelector + } + + limitSet := listOpts.Limit > 0 + + runtimeObjs := make([]runtime.Object, 0, len(objs)) + for _, item := range objs { + // if the Limit option is set and the number of items + // listed exceeds this limit, then stop reading. + if limitSet && int64(len(runtimeObjs)) >= listOpts.Limit { + break + } + obj, isObj := item.(runtime.Object) + if !isObj { + return fmt.Errorf("cache contained %T, which is not an Object", obj) + } + meta, err := apimeta.Accessor(obj) + if err != nil { + return err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + + var outObj runtime.Object + if c.disableDeepCopy || (listOpts.UnsafeDisableDeepCopy != nil && *listOpts.UnsafeDisableDeepCopy) { + // skip deep copy which might be unsafe + // you must DeepCopy any object before mutating it outside + outObj = obj + } else { + outObj = obj.DeepCopyObject() + outObj.GetObjectKind().SetGroupVersionKind(c.groupVersionKind) + } + runtimeObjs = append(runtimeObjs, outObj) + } + return apimeta.SetList(out, runtimeObjs) +} + +// objectKeyToStorageKey converts an object key to store key. +// It's akin to MetaNamespaceKeyFunc. It's separate from +// String to allow keeping the key format easily in sync with +// MetaNamespaceKeyFunc. +func objectKeyToStoreKey(k client.ObjectKey) string { + if k.Namespace == "" { + return k.Name + } + return k.Namespace + "/" + k.Name +} + +// FieldIndexName constructs the name of the index over the given field, +// for use with an indexer. +func FieldIndexName(field string) string { + return "field:" + field +} + +// noNamespaceNamespace is used as the "namespace" when we want to list across all namespaces. +const allNamespacesNamespace = "__all_namespaces" + +// KeyToNamespacedKey prefixes the given index key with a namespace +// for use in field selector indexes. +func KeyToNamespacedKey(ns string, baseKey string) string { + if ns != "" { + return ns + "/" + baseKey + } + return allNamespacesNamespace + "/" + baseKey +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go new file mode 100644 index 00000000..27f46e32 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/deleg_map.go @@ -0,0 +1,126 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" +) + +// InformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type InformersMap struct { + // we abstract over the details of structured/unstructured/metadata with the specificInformerMaps + // TODO(directxman12): genericize this over different projections now that we have 3 different maps + + structured *specificInformersMap + unstructured *specificInformersMap + metadata *specificInformersMap + + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme +} + +// NewInformersMap creates a new InformersMap that can create informers for +// both structured and unstructured objects. +func NewInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, +) *InformersMap { + return &InformersMap{ + structured: newStructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + unstructured: newUnstructuredInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + metadata: newMetadataInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers), + + Scheme: scheme, + } +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +func (m *InformersMap) Start(ctx context.Context) error { + go m.structured.Start(ctx) + go m.unstructured.Start(ctx) + go m.metadata.Start(ctx) + <-ctx.Done() + return nil +} + +// WaitForCacheSync waits until all the caches have been started and synced. +func (m *InformersMap) WaitForCacheSync(ctx context.Context) bool { + syncedFuncs := append([]cache.InformerSynced(nil), m.structured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.unstructured.HasSyncedFuncs()...) + syncedFuncs = append(syncedFuncs, m.metadata.HasSyncedFuncs()...) + + if !m.structured.waitForStarted(ctx) { + return false + } + if !m.unstructured.waitForStarted(ctx) { + return false + } + if !m.metadata.waitForStarted(ctx) { + return false + } + return cache.WaitForCacheSync(ctx.Done(), syncedFuncs...) +} + +// Get will create a new Informer and add it to the map of InformersMap if none exists. Returns +// the Informer from the map. +func (m *InformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + switch obj.(type) { + case *unstructured.Unstructured: + return m.unstructured.Get(ctx, gvk, obj) + case *unstructured.UnstructuredList: + return m.unstructured.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadata: + return m.metadata.Get(ctx, gvk, obj) + case *metav1.PartialObjectMetadataList: + return m.metadata.Get(ctx, gvk, obj) + default: + return m.structured.Get(ctx, gvk, obj) + } +} + +// newStructuredInformersMap creates a new InformersMap for structured objects. +func newStructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createStructuredListWatch) +} + +// newUnstructuredInformersMap creates a new InformersMap for unstructured objects. +func newUnstructuredInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createUnstructuredListWatch) +} + +// newMetadataInformersMap creates a new InformersMap for metadata-only objects. +func newMetadataInformersMap(config *rest.Config, scheme *runtime.Scheme, mapper meta.RESTMapper, resync time.Duration, + namespace string, selectors SelectorsByGVK, disableDeepCopy DisableDeepCopyByGVK, transformers TransformFuncByObject) *specificInformersMap { + return newSpecificInformersMap(config, scheme, mapper, resync, namespace, selectors, disableDeepCopy, transformers, createMetadataListWatch) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go new file mode 100644 index 00000000..54bd7eec --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/disabledeepcopy.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// GroupVersionKindAll is the argument to represent all GroupVersionKind types. +var GroupVersionKindAll = schema.GroupVersionKind{} + +// DisableDeepCopyByGVK associate a GroupVersionKind to disable DeepCopy during get or list from cache. +type DisableDeepCopyByGVK map[schema.GroupVersionKind]bool + +// IsDisabled returns whether a GroupVersionKind is disabled DeepCopy. +func (disableByGVK DisableDeepCopyByGVK) IsDisabled(gvk schema.GroupVersionKind) bool { + if d, ok := disableByGVK[gvk]; ok { + return d + } else if d, ok = disableByGVK[GroupVersionKindAll]; ok { + return d + } + return false +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go new file mode 100644 index 00000000..1524d231 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/informers_map.go @@ -0,0 +1,480 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + "context" + "fmt" + "math/rand" + "sync" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// clientListWatcherFunc knows how to create a ListWatcher. +type createListWatcherFunc func(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) + +// newSpecificInformersMap returns a new specificInformersMap (like +// the generical InformersMap, except that it doesn't implement WaitForCacheSync). +func newSpecificInformersMap(config *rest.Config, + scheme *runtime.Scheme, + mapper meta.RESTMapper, + resync time.Duration, + namespace string, + selectors SelectorsByGVK, + disableDeepCopy DisableDeepCopyByGVK, + transformers TransformFuncByObject, + createListWatcher createListWatcherFunc, +) *specificInformersMap { + ip := &specificInformersMap{ + config: config, + Scheme: scheme, + mapper: mapper, + informersByGVK: make(map[schema.GroupVersionKind]*MapEntry), + codecs: serializer.NewCodecFactory(scheme), + paramCodec: runtime.NewParameterCodec(scheme), + resync: resync, + startWait: make(chan struct{}), + createListWatcher: createListWatcher, + namespace: namespace, + selectors: selectors.forGVK, + disableDeepCopy: disableDeepCopy, + transformers: transformers, + } + return ip +} + +// MapEntry contains the cached data for an Informer. +type MapEntry struct { + // Informer is the cached informer + Informer cache.SharedIndexInformer + + // CacheReader wraps Informer and implements the CacheReader interface for a single type + Reader CacheReader +} + +// specificInformersMap create and caches Informers for (runtime.Object, schema.GroupVersionKind) pairs. +// It uses a standard parameter codec constructed based on the given generated Scheme. +type specificInformersMap struct { + // Scheme maps runtime.Objects to GroupVersionKinds + Scheme *runtime.Scheme + + // config is used to talk to the apiserver + config *rest.Config + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // informersByGVK is the cache of informers keyed by groupVersionKind + informersByGVK map[schema.GroupVersionKind]*MapEntry + + // codecs is used to create a new REST client + codecs serializer.CodecFactory + + // paramCodec is used by list and watch + paramCodec runtime.ParameterCodec + + // stop is the stop channel to stop informers + stop <-chan struct{} + + // resync is the base frequency the informers are resynced + // a 10 percent jitter will be added to the resync period between informers + // so that all informers will not send list requests simultaneously. + resync time.Duration + + // mu guards access to the map + mu sync.RWMutex + + // start is true if the informers have been started + started bool + + // startWait is a channel that is closed after the + // informer has been started. + startWait chan struct{} + + // createClient knows how to create a client and a list object, + // and allows for abstracting over the particulars of structured vs + // unstructured objects. + createListWatcher createListWatcherFunc + + // namespace is the namespace that all ListWatches are restricted to + // default or empty string means all namespaces + namespace string + + // selectors are the label or field selectors that will be added to the + // ListWatch ListOptions. + selectors func(gvk schema.GroupVersionKind) Selector + + // disableDeepCopy indicates not to deep copy objects during get or list objects. + disableDeepCopy DisableDeepCopyByGVK + + // transform funcs are applied to objects before they are committed to the cache + transformers TransformFuncByObject +} + +// Start calls Run on each of the informers and sets started to true. Blocks on the context. +// It doesn't return start because it can't return an error, and it's not a runnable directly. +func (ip *specificInformersMap) Start(ctx context.Context) { + func() { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Set the stop channel so it can be passed to informers that are added later + ip.stop = ctx.Done() + + // Start each informer + for _, informer := range ip.informersByGVK { + go informer.Informer.Run(ctx.Done()) + } + + // Set started to true so we immediately start any informers added later. + ip.started = true + close(ip.startWait) + }() + <-ctx.Done() +} + +func (ip *specificInformersMap) waitForStarted(ctx context.Context) bool { + select { + case <-ip.startWait: + return true + case <-ctx.Done(): + return false + } +} + +// HasSyncedFuncs returns all the HasSynced functions for the informers in this map. +func (ip *specificInformersMap) HasSyncedFuncs() []cache.InformerSynced { + ip.mu.RLock() + defer ip.mu.RUnlock() + syncedFuncs := make([]cache.InformerSynced, 0, len(ip.informersByGVK)) + for _, informer := range ip.informersByGVK { + syncedFuncs = append(syncedFuncs, informer.Informer.HasSynced) + } + return syncedFuncs +} + +// Get will create a new Informer and add it to the map of specificInformersMap if none exists. Returns +// the Informer from the map. +func (ip *specificInformersMap) Get(ctx context.Context, gvk schema.GroupVersionKind, obj runtime.Object) (bool, *MapEntry, error) { + // Return the informer if it is found + i, started, ok := func() (*MapEntry, bool, bool) { + ip.mu.RLock() + defer ip.mu.RUnlock() + i, ok := ip.informersByGVK[gvk] + return i, ip.started, ok + }() + + if !ok { + var err error + if i, started, err = ip.addInformerToMap(gvk, obj); err != nil { + return started, nil, err + } + } + + if started && !i.Informer.HasSynced() { + // Wait for it to sync before returning the Informer so that folks don't read from a stale cache. + if !cache.WaitForCacheSync(ctx.Done(), i.Informer.HasSynced) { + return started, nil, apierrors.NewTimeoutError(fmt.Sprintf("failed waiting for %T Informer to sync", obj), 0) + } + } + + return started, i, nil +} + +func (ip *specificInformersMap) addInformerToMap(gvk schema.GroupVersionKind, obj runtime.Object) (*MapEntry, bool, error) { + ip.mu.Lock() + defer ip.mu.Unlock() + + // Check the cache to see if we already have an Informer. If we do, return the Informer. + // This is for the case where 2 routines tried to get the informer when it wasn't in the map + // so neither returned early, but the first one created it. + if i, ok := ip.informersByGVK[gvk]; ok { + return i, ip.started, nil + } + + // Create a NewSharedIndexInformer and add it to the map. + var lw *cache.ListWatch + lw, err := ip.createListWatcher(gvk, ip) + if err != nil { + return nil, false, err + } + ni := cache.NewSharedIndexInformer(lw, obj, resyncPeriod(ip.resync)(), cache.Indexers{ + cache.NamespaceIndex: cache.MetaNamespaceIndexFunc, + }) + + // Check to see if there is a transformer for this gvk + if err := ni.SetTransform(ip.transformers.Get(gvk)); err != nil { + return nil, false, err + } + + rm, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, false, err + } + + i := &MapEntry{ + Informer: ni, + Reader: CacheReader{ + indexer: ni.GetIndexer(), + groupVersionKind: gvk, + scopeName: rm.Scope.Name(), + disableDeepCopy: ip.disableDeepCopy.IsDisabled(gvk), + }, + } + ip.informersByGVK[gvk] = i + + // Start the Informer if need by + // TODO(seans): write thorough tests and document what happens here - can you add indexers? + // can you add eventhandlers? + if ip.started { + go i.Informer.Run(ip.stop) + } + return i, ip.started, nil +} + +// newListWatch returns a new ListWatch object that can be used to create a SharedIndexInformer. +func createStructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + client, err := apiutil.RESTClientForGVK(gvk, false, ip.config, ip.codecs) + if err != nil { + return nil, err + } + listGVK := gvk.GroupVersion().WithKind(gvk.Kind + "List") + listObj, err := ip.Scheme.New(listGVK) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + res := listObj.DeepCopyObject() + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + err := client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Do(ctx).Into(res) + return res, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + isNamespaceScoped := namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot + return client.Get().NamespaceIfScoped(namespace, isNamespaceScoped).Resource(mapping.Resource.Resource).VersionedParams(&opts, ip.paramCodec).Watch(ctx) + }, + }, nil +} + +func createUnstructuredListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // If the rest configuration has a negotiated serializer passed in, + // we should remove it and use the one that the dynamic client sets for us. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + dynamicClient, err := dynamic.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + // Create a new ListWatch for the obj + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).List(ctx, opts) + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + return dynamicClient.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } + return dynamicClient.Resource(mapping.Resource).Watch(ctx, opts) + }, + }, nil +} + +func createMetadataListWatch(gvk schema.GroupVersionKind, ip *specificInformersMap) (*cache.ListWatch, error) { + // Kubernetes APIs work against Resources, not GroupVersionKinds. Map the + // groupVersionKind to the Resource API we will use. + mapping, err := ip.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + + // Always clear the negotiated serializer and use the one + // set from the metadata client. + cfg := rest.CopyConfig(ip.config) + cfg.NegotiatedSerializer = nil + + // grab the metadata client + client, err := metadata.NewForConfig(cfg) + if err != nil { + return nil, err + } + + // TODO: the functions that make use of this ListWatch should be adapted to + // pass in their own contexts instead of relying on this fixed one here. + ctx := context.TODO() + + // create the relevant listwatch + return &cache.ListWatch{ + ListFunc: func(opts metav1.ListOptions) (runtime.Object, error) { + ip.selectors(gvk).ApplyToList(&opts) + + var ( + list *metav1.PartialObjectMetadataList + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + list, err = client.Resource(mapping.Resource).Namespace(namespace).List(ctx, opts) + } else { + list, err = client.Resource(mapping.Resource).List(ctx, opts) + } + if list != nil { + for i := range list.Items { + list.Items[i].SetGroupVersionKind(gvk) + } + } + return list, err + }, + // Setup the watch function + WatchFunc: func(opts metav1.ListOptions) (watch.Interface, error) { + ip.selectors(gvk).ApplyToList(&opts) + // Watch needs to be set to true separately + opts.Watch = true + + var ( + watcher watch.Interface + err error + ) + namespace := restrictNamespaceBySelector(ip.namespace, ip.selectors(gvk)) + if namespace != "" && mapping.Scope.Name() != meta.RESTScopeNameRoot { + watcher, err = client.Resource(mapping.Resource).Namespace(namespace).Watch(ctx, opts) + } else { + watcher, err = client.Resource(mapping.Resource).Watch(ctx, opts) + } + if watcher != nil { + watcher = newGVKFixupWatcher(gvk, watcher) + } + return watcher, err + }, + }, nil +} + +// newGVKFixupWatcher adds a wrapper that preserves the GVK information when +// events come in. +// +// This works around a bug where GVK information is not passed into mapping +// functions when using the OnlyMetadata option in the builder. +// This issue is most likely caused by kubernetes/kubernetes#80609. +// See kubernetes-sigs/controller-runtime#1484. +// +// This was originally implemented as a cache.ResourceEventHandler wrapper but +// that contained a data race which was resolved by setting the GVK in a watch +// wrapper, before the objects are written to the cache. +// See kubernetes-sigs/controller-runtime#1650. +// +// The original watch wrapper was found to be incompatible with +// k8s.io/client-go/tools/cache.Reflector so it has been re-implemented as a +// watch.Filter which is compatible. +// See kubernetes-sigs/controller-runtime#1789. +func newGVKFixupWatcher(gvk schema.GroupVersionKind, watcher watch.Interface) watch.Interface { + return watch.Filter( + watcher, + func(in watch.Event) (watch.Event, bool) { + in.Object.GetObjectKind().SetGroupVersionKind(gvk) + return in, true + }, + ) +} + +// resyncPeriod returns a function which generates a duration each time it is +// invoked; this is so that multiple controllers don't get into lock-step and all +// hammer the apiserver with list requests simultaneously. +func resyncPeriod(resync time.Duration) func() time.Duration { + return func() time.Duration { + // the factor will fall into [0.9, 1.1) + factor := rand.Float64()/5.0 + 0.9 //nolint:gosec + return time.Duration(float64(resync.Nanoseconds()) * factor) + } +} + +// restrictNamespaceBySelector returns either a global restriction for all ListWatches +// if not default/empty, or the namespace that a ListWatch for the specific resource +// is restricted to, based on a specified field selector for metadata.namespace field. +func restrictNamespaceBySelector(namespaceOpt string, s Selector) string { + if namespaceOpt != "" { + // namespace is already restricted + return namespaceOpt + } + fieldSelector := s.Field + if fieldSelector == nil || fieldSelector.Empty() { + return "" + } + // check whether a selector includes the namespace field + value, found := fieldSelector.RequiresExactMatch("metadata.namespace") + if found { + return value + } + return "" +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go new file mode 100644 index 00000000..4eff32fb --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/selector.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package internal + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SelectorsByGVK associate a GroupVersionKind to a field/label selector. +type SelectorsByGVK map[schema.GroupVersionKind]Selector + +func (s SelectorsByGVK) forGVK(gvk schema.GroupVersionKind) Selector { + if specific, found := s[gvk]; found { + return specific + } + if defaultSelector, found := s[schema.GroupVersionKind{}]; found { + return defaultSelector + } + + return Selector{} +} + +// Selector specify the label/field selector to fill in ListOptions. +type Selector struct { + Label labels.Selector + Field fields.Selector +} + +// ApplyToList fill in ListOptions LabelSelector and FieldSelector if needed. +func (s Selector) ApplyToList(listOpts *metav1.ListOptions) { + if s.Label != nil { + listOpts.LabelSelector = s.Label.String() + } + if s.Field != nil { + listOpts.FieldSelector = s.Field.String() + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go new file mode 100644 index 00000000..f69e0226 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/internal/transformers.go @@ -0,0 +1,55 @@ +package internal + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// TransformFuncByObject provides access to the correct transform function for +// any given GVK. +type TransformFuncByObject interface { + Set(runtime.Object, *runtime.Scheme, cache.TransformFunc) error + Get(schema.GroupVersionKind) cache.TransformFunc + SetDefault(transformer cache.TransformFunc) +} + +type transformFuncByGVK struct { + defaultTransform cache.TransformFunc + transformers map[schema.GroupVersionKind]cache.TransformFunc +} + +// TransformFuncByObjectFromMap creates a TransformFuncByObject from a map that +// maps GVKs to TransformFuncs. +func TransformFuncByObjectFromMap(in map[schema.GroupVersionKind]cache.TransformFunc) TransformFuncByObject { + byGVK := &transformFuncByGVK{} + if defaultFunc, hasDefault := in[schema.GroupVersionKind{}]; hasDefault { + byGVK.defaultTransform = defaultFunc + } + delete(in, schema.GroupVersionKind{}) + byGVK.transformers = in + return byGVK +} + +func (t *transformFuncByGVK) SetDefault(transformer cache.TransformFunc) { + t.defaultTransform = transformer +} + +func (t *transformFuncByGVK) Set(obj runtime.Object, scheme *runtime.Scheme, transformer cache.TransformFunc) error { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return err + } + + t.transformers[gvk] = transformer + return nil +} + +func (t transformFuncByGVK) Get(gvk schema.GroupVersionKind) cache.TransformFunc { + if val, ok := t.transformers[gvk]; ok { + return val + } + return t.defaultTransform +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go new file mode 100644 index 00000000..fccb3647 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/cache/multi_namespace_cache.go @@ -0,0 +1,361 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cache + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/rest" + toolscache "k8s.io/client-go/tools/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewCacheFunc - Function for creating a new cache from the options and a rest config. +type NewCacheFunc func(config *rest.Config, opts Options) (Cache, error) + +// a new global namespaced cache to handle cluster scoped resources. +const globalCache = "_cluster-scope" + +// MultiNamespacedCacheBuilder - Builder function to create a new multi-namespaced cache. +// This will scope the cache to a list of namespaces. Listing for all namespaces +// will list for all the namespaces that this knows about. By default this will create +// a global cache for cluster scoped resource. Note that this is not intended +// to be used for excluding namespaces, this is better done via a Predicate. Also note that +// you may face performance issues when using this with a high number of namespaces. +func MultiNamespacedCacheBuilder(namespaces []string) NewCacheFunc { + return func(config *rest.Config, opts Options) (Cache, error) { + opts, err := defaultOpts(config, opts) + if err != nil { + return nil, err + } + + caches := map[string]Cache{} + + // create a cache for cluster scoped resources + gCache, err := New(config, opts) + if err != nil { + return nil, fmt.Errorf("error creating global cache: %w", err) + } + + for _, ns := range namespaces { + opts.Namespace = ns + c, err := New(config, opts) + if err != nil { + return nil, err + } + caches[ns] = c + } + return &multiNamespaceCache{namespaceToCache: caches, Scheme: opts.Scheme, RESTMapper: opts.Mapper, clusterCache: gCache}, nil + } +} + +// multiNamespaceCache knows how to handle multiple namespaced caches +// Use this feature when scoping permissions for your +// operator to a list of namespaces instead of watching every namespace +// in the cluster. +type multiNamespaceCache struct { + namespaceToCache map[string]Cache + Scheme *runtime.Scheme + RESTMapper apimeta.RESTMapper + clusterCache Cache +} + +var _ Cache = &multiNamespaceCache{} + +// Methods for multiNamespaceCache to conform to the Informers interface. +func (c *multiNamespaceCache) GetInformer(ctx context.Context, obj client.Object) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformer(ctx, obj) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind) (Informer, error) { + informers := map[string]Informer{} + + // If the object is clusterscoped, get the informer from clusterCache, + // if not use the namespaced caches. + isNamespaced, err := objectutil.IsAPINamespacedWithGVK(gvk, c.Scheme, c.RESTMapper) + if err != nil { + return nil, err + } + if !isNamespaced { + clusterCacheInf, err := c.clusterCache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[globalCache] = clusterCacheInf + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil + } + + for ns, cache := range c.namespaceToCache { + informer, err := cache.GetInformerForKind(ctx, gvk) + if err != nil { + return nil, err + } + informers[ns] = informer + } + + return &multiNamespaceInformer{namespaceToInformer: informers}, nil +} + +func (c *multiNamespaceCache) Start(ctx context.Context) error { + // start global cache + go func() { + err := c.clusterCache.Start(ctx) + if err != nil { + log.Error(err, "cluster scoped cache failed to start") + } + }() + + // start namespaced caches + for ns, cache := range c.namespaceToCache { + go func(ns string, cache Cache) { + err := cache.Start(ctx) + if err != nil { + log.Error(err, "multinamespace cache failed to start namespaced informer", "namespace", ns) + } + }(ns, cache) + } + + <-ctx.Done() + return nil +} + +func (c *multiNamespaceCache) WaitForCacheSync(ctx context.Context) bool { + synced := true + for _, cache := range c.namespaceToCache { + if s := cache.WaitForCacheSync(ctx); !s { + synced = s + } + } + + // check if cluster scoped cache has synced + if !c.clusterCache.WaitForCacheSync(ctx) { + synced = false + } + return synced +} + +func (c *multiNamespaceCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return nil //nolint:nilerr + } + + if !isNamespaced { + return c.clusterCache.IndexField(ctx, obj, field, extractValue) + } + + for _, cache := range c.namespaceToCache { + if err := cache.IndexField(ctx, obj, field, extractValue); err != nil { + return err + } + } + return nil +} + +func (c *multiNamespaceCache) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + isNamespaced, err := objectutil.IsAPINamespaced(obj, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look into the global cache to fetch the object + return c.clusterCache.Get(ctx, key, obj) + } + + cache, ok := c.namespaceToCache[key.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", key) + } + return cache.Get(ctx, key, obj) +} + +// List multi namespace cache will get all the objects in the namespaces that the cache is watching if asked for all namespaces. +func (c *multiNamespaceCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + listOpts := client.ListOptions{} + listOpts.ApplyOptions(opts) + + isNamespaced, err := objectutil.IsAPINamespaced(list, c.Scheme, c.RESTMapper) + if err != nil { + return err + } + + if !isNamespaced { + // Look at the global cache to get the objects with the specified GVK + return c.clusterCache.List(ctx, list, opts...) + } + + if listOpts.Namespace != corev1.NamespaceAll { + cache, ok := c.namespaceToCache[listOpts.Namespace] + if !ok { + return fmt.Errorf("unable to get: %v because of unknown namespace for the cache", listOpts.Namespace) + } + return cache.List(ctx, list, opts...) + } + + listAccessor, err := apimeta.ListAccessor(list) + if err != nil { + return err + } + + allItems, err := apimeta.ExtractList(list) + if err != nil { + return err + } + + limitSet := listOpts.Limit > 0 + + var resourceVersion string + for _, cache := range c.namespaceToCache { + listObj := list.DeepCopyObject().(client.ObjectList) + err = cache.List(ctx, listObj, &listOpts) + if err != nil { + return err + } + items, err := apimeta.ExtractList(listObj) + if err != nil { + return err + } + accessor, err := apimeta.ListAccessor(listObj) + if err != nil { + return fmt.Errorf("object: %T must be a list type", list) + } + allItems = append(allItems, items...) + // The last list call should have the most correct resource version. + resourceVersion = accessor.GetResourceVersion() + if limitSet { + // decrement Limit by the number of items + // fetched from the current namespace. + listOpts.Limit -= int64(len(items)) + // if a Limit was set and the number of + // items read has reached this set limit, + // then stop reading. + if listOpts.Limit == 0 { + break + } + } + } + listAccessor.SetResourceVersion(resourceVersion) + + return apimeta.SetList(list, allItems) +} + +// multiNamespaceInformer knows how to handle interacting with the underlying informer across multiple namespaces. +type multiNamespaceInformer struct { + namespaceToInformer map[string]Informer +} + +var _ Informer = &multiNamespaceInformer{} + +// AddEventHandler adds the handler to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandler(handler toolscache.ResourceEventHandler) (toolscache.ResourceEventHandlerRegistration, error) { + handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + for ns, informer := range i.namespaceToInformer { + registration, err := informer.AddEventHandler(handler) + if err != nil { + return nil, err + } + handles[ns] = registration + } + return handles, nil +} + +// AddEventHandlerWithResyncPeriod adds the handler with a resync period to each namespaced informer. +func (i *multiNamespaceInformer) AddEventHandlerWithResyncPeriod(handler toolscache.ResourceEventHandler, resyncPeriod time.Duration) (toolscache.ResourceEventHandlerRegistration, error) { + handles := make(map[string]toolscache.ResourceEventHandlerRegistration, len(i.namespaceToInformer)) + for ns, informer := range i.namespaceToInformer { + registration, err := informer.AddEventHandlerWithResyncPeriod(handler, resyncPeriod) + if err != nil { + return nil, err + } + handles[ns] = registration + } + return handles, nil +} + +// RemoveEventHandler removes a formerly added event handler given by its registration handle. +func (i *multiNamespaceInformer) RemoveEventHandler(h toolscache.ResourceEventHandlerRegistration) error { + handles, ok := h.(map[string]toolscache.ResourceEventHandlerRegistration) + if !ok { + return fmt.Errorf("it is not the registration returned by multiNamespaceInformer") + } + for ns, informer := range i.namespaceToInformer { + registration, ok := handles[ns] + if !ok { + continue + } + if err := informer.RemoveEventHandler(registration); err != nil { + return err + } + } + return nil +} + +// AddIndexers adds the indexer for each namespaced informer. +func (i *multiNamespaceInformer) AddIndexers(indexers toolscache.Indexers) error { + for _, informer := range i.namespaceToInformer { + err := informer.AddIndexers(indexers) + if err != nil { + return err + } + } + return nil +} + +// HasSynced checks if each namespaced informer has synced. +func (i *multiNamespaceInformer) HasSynced() bool { + for _, informer := range i.namespaceToInformer { + if ok := informer.HasSynced(); !ok { + return ok + } + } + return true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go new file mode 100644 index 00000000..3055f4c4 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/apimachinery.go @@ -0,0 +1,196 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package apiutil contains utilities for working with raw Kubernetes +// API machinery, such as creating RESTMappers and raw REST clients, +// and extracting the GVK of an object. +package apiutil + +import ( + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/discovery" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +var ( + protobufScheme = runtime.NewScheme() + protobufSchemeLock sync.RWMutex +) + +func init() { + // Currently only enabled for built-in resources which are guaranteed to implement Protocol Buffers. + // For custom resources, CRDs can not support Protocol Buffers but Aggregated API can. + // See doc: https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility + if err := clientgoscheme.AddToScheme(protobufScheme); err != nil { + panic(err) + } +} + +// AddToProtobufScheme add the given SchemeBuilder into protobufScheme, which should +// be additional types that do support protobuf. +func AddToProtobufScheme(addToScheme func(*runtime.Scheme) error) error { + protobufSchemeLock.Lock() + defer protobufSchemeLock.Unlock() + return addToScheme(protobufScheme) +} + +// NewDiscoveryRESTMapper constructs a new RESTMapper based on discovery +// information fetched by a new client with the given config. +func NewDiscoveryRESTMapper(c *rest.Config) (meta.RESTMapper, error) { + // Get a mapper + dc, err := discovery.NewDiscoveryClientForConfig(c) + if err != nil { + return nil, err + } + gr, err := restmapper.GetAPIGroupResources(dc) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(gr), nil +} + +// GVKForObject finds the GroupVersionKind associated with the given object, if there is only a single such GVK. +func GVKForObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionKind, error) { + // TODO(directxman12): do we want to generalize this to arbitrary container types? + // I think we'd need a generalized form of scheme or something. It's a + // shame there's not a reliable "GetGVK" interface that works by default + // for unpopulated static types and populated "dynamic" types + // (unstructured, partial, etc) + + // check for PartialObjectMetadata, which is analogous to unstructured, but isn't handled by ObjectKinds + _, isPartial := obj.(*metav1.PartialObjectMetadata) + _, isPartialList := obj.(*metav1.PartialObjectMetadataList) + if isPartial || isPartialList { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return schema.GroupVersionKind{}, runtime.NewMissingVersionErr("unstructured object has no version") + } + return gvk, nil + } + + gvks, isUnversioned, err := scheme.ObjectKinds(obj) + if err != nil { + return schema.GroupVersionKind{}, err + } + if isUnversioned { + return schema.GroupVersionKind{}, fmt.Errorf("cannot create group-version-kind for unversioned type %T", obj) + } + + if len(gvks) < 1 { + return schema.GroupVersionKind{}, fmt.Errorf("no group-version-kinds associated with type %T", obj) + } + if len(gvks) > 1 { + // this should only trigger for things like metav1.XYZ -- + // normal versioned types should be fine + return schema.GroupVersionKind{}, fmt.Errorf( + "multiple group-version-kinds associated with type %T, refusing to guess at one", obj) + } + return gvks[0], nil +} + +// RESTClientForGVK constructs a new rest.Interface capable of accessing the resource associated +// with the given GroupVersionKind. The REST client will be configured to use the negotiated serializer from +// baseConfig, if set, otherwise a default serializer will be set. +func RESTClientForGVK(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) (rest.Interface, error) { + return rest.RESTClientFor(createRestConfig(gvk, isUnstructured, baseConfig, codecs)) +} + +// serializerWithDecodedGVK is a CodecFactory that overrides the DecoderToVersion of a WithoutConversionCodecFactory +// in order to avoid clearing the GVK from the decoded object. +// +// See https://github.com/kubernetes/kubernetes/issues/80609. +type serializerWithDecodedGVK struct { + serializer.WithoutConversionCodecFactory +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f serializerWithDecodedGVK) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return serializer +} + +// createRestConfig copies the base config and updates needed fields for a new rest config. +func createRestConfig(gvk schema.GroupVersionKind, isUnstructured bool, baseConfig *rest.Config, codecs serializer.CodecFactory) *rest.Config { + gv := gvk.GroupVersion() + + cfg := rest.CopyConfig(baseConfig) + cfg.GroupVersion = &gv + if gvk.Group == "" { + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + if cfg.UserAgent == "" { + cfg.UserAgent = rest.DefaultKubernetesUserAgent() + } + // TODO(FillZpp): In the long run, we want to check discovery or something to make sure that this is actually true. + if cfg.ContentType == "" && !isUnstructured { + protobufSchemeLock.RLock() + if protobufScheme.Recognizes(gvk) { + cfg.ContentType = runtime.ContentTypeProtobuf + } + protobufSchemeLock.RUnlock() + } + + if isUnstructured { + // If the object is unstructured, we need to preserve the GVK information. + // Use our own custom serializer. + cfg.NegotiatedSerializer = serializerWithDecodedGVK{serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } else { + cfg.NegotiatedSerializer = serializerWithTargetZeroingDecode{NegotiatedSerializer: serializer.WithoutConversionCodecFactory{CodecFactory: codecs}} + } + + return cfg +} + +type serializerWithTargetZeroingDecode struct { + runtime.NegotiatedSerializer +} + +func (s serializerWithTargetZeroingDecode) DecoderToVersion(serializer runtime.Decoder, r runtime.GroupVersioner) runtime.Decoder { + return targetZeroingDecoder{upstream: s.NegotiatedSerializer.DecoderToVersion(serializer, r)} +} + +type targetZeroingDecoder struct { + upstream runtime.Decoder +} + +func (t targetZeroingDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + zero(into) + return t.upstream.Decode(data, defaults, into) +} + +// zero zeros the value of a pointer. +func zero(x interface{}) { + if x == nil { + return + } + res := reflect.ValueOf(x).Elem() + res.Set(reflect.Zero(res.Type())) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go new file mode 100644 index 00000000..6b9dcf68 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/dynamicrestmapper.go @@ -0,0 +1,301 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "sync" + "sync/atomic" + + "golang.org/x/time/rate" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/restmapper" +) + +// dynamicRESTMapper is a RESTMapper that dynamically discovers resource +// types at runtime. +type dynamicRESTMapper struct { + mu sync.RWMutex // protects the following fields + staticMapper meta.RESTMapper + limiter *rate.Limiter + newMapper func() (meta.RESTMapper, error) + + lazy bool + // Used for lazy init. + inited uint32 + initMtx sync.Mutex + + useLazyRestmapper bool +} + +// DynamicRESTMapperOption is a functional option on the dynamicRESTMapper. +type DynamicRESTMapperOption func(*dynamicRESTMapper) error + +// WithLimiter sets the RESTMapper's underlying limiter to lim. +func WithLimiter(lim *rate.Limiter) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.limiter = lim + return nil + } +} + +// WithLazyDiscovery prevents the RESTMapper from discovering REST mappings +// until an API call is made. +var WithLazyDiscovery DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.lazy = true + return nil +} + +// WithExperimentalLazyMapper enables experimental more advanced Lazy Restmapping mechanism. +var WithExperimentalLazyMapper DynamicRESTMapperOption = func(drm *dynamicRESTMapper) error { + drm.useLazyRestmapper = true + return nil +} + +// WithCustomMapper supports setting a custom RESTMapper refresher instead of +// the default method, which uses a discovery client. +// +// This exists mainly for testing, but can be useful if you need tighter control +// over how discovery is performed, which discovery endpoints are queried, etc. +func WithCustomMapper(newMapper func() (meta.RESTMapper, error)) DynamicRESTMapperOption { + return func(drm *dynamicRESTMapper) error { + drm.newMapper = newMapper + return nil + } +} + +// NewDynamicRESTMapper returns a dynamic RESTMapper for cfg. The dynamic +// RESTMapper dynamically discovers resource types at runtime. opts +// configure the RESTMapper. +func NewDynamicRESTMapper(cfg *rest.Config, opts ...DynamicRESTMapperOption) (meta.RESTMapper, error) { + client, err := discovery.NewDiscoveryClientForConfig(cfg) + if err != nil { + return nil, err + } + drm := &dynamicRESTMapper{ + limiter: rate.NewLimiter(rate.Limit(defaultRefillRate), defaultLimitSize), + newMapper: func() (meta.RESTMapper, error) { + groupResources, err := restmapper.GetAPIGroupResources(client) + if err != nil { + return nil, err + } + return restmapper.NewDiscoveryRESTMapper(groupResources), nil + }, + } + for _, opt := range opts { + if err = opt(drm); err != nil { + return nil, err + } + } + if drm.useLazyRestmapper { + return newLazyRESTMapperWithClient(client) + } + if !drm.lazy { + if err := drm.setStaticMapper(); err != nil { + return nil, err + } + } + return drm, nil +} + +var ( + // defaultRefilRate is the default rate at which potential calls are + // added back to the "bucket" of allowed calls. + defaultRefillRate = 5 + // defaultLimitSize is the default starting/max number of potential calls + // per second. Once a call is used, it's added back to the bucket at a rate + // of defaultRefillRate per second. + defaultLimitSize = 5 +) + +// setStaticMapper sets drm's staticMapper by querying its client, regardless +// of reload backoff. +func (drm *dynamicRESTMapper) setStaticMapper() error { + newMapper, err := drm.newMapper() + if err != nil { + return err + } + drm.staticMapper = newMapper + return nil +} + +// init initializes drm only once if drm is lazy. +func (drm *dynamicRESTMapper) init() (err error) { + // skip init if drm is not lazy or has initialized + if !drm.lazy || atomic.LoadUint32(&drm.inited) != 0 { + return nil + } + + drm.initMtx.Lock() + defer drm.initMtx.Unlock() + if drm.inited == 0 { + if err = drm.setStaticMapper(); err == nil { + atomic.StoreUint32(&drm.inited, 1) + } + } + return err +} + +// checkAndReload attempts to call the given callback, which is assumed to be dependent +// on the data in the restmapper. +// +// If the callback returns an error matching meta.IsNoMatchErr, it will attempt to reload +// the RESTMapper's data and re-call the callback once that's occurred. +// If the callback returns any other error, the function will return immediately regardless. +// +// It will take care of ensuring that reloads are rate-limited and that extraneous calls +// aren't made. If a reload would exceed the limiters rate, it returns the error return by +// the callback. +// It's thread-safe, and worries about thread-safety for the callback (so the callback does +// not need to attempt to lock the restmapper). +func (drm *dynamicRESTMapper) checkAndReload(checkNeedsReload func() error) error { + // first, check the common path -- data is fresh enough + // (use an IIFE for the lock's defer) + err := func() error { + drm.mu.RLock() + defer drm.mu.RUnlock() + + return checkNeedsReload() + }() + + needsReload := meta.IsNoMatchError(err) + if !needsReload { + return err + } + + // if the data wasn't fresh, we'll need to try and update it, so grab the lock... + drm.mu.Lock() + defer drm.mu.Unlock() + + // ... and double-check that we didn't reload in the meantime + err = checkNeedsReload() + needsReload = meta.IsNoMatchError(err) + if !needsReload { + return err + } + + // we're still stale, so grab a rate-limit token if we can... + if !drm.limiter.Allow() { + // return error from static mapper here, we have refreshed often enough (exceeding rate of provided limiter) + // so that client's can handle this the same way as a "normal" NoResourceMatchError / NoKindMatchError + return err + } + + // ...reload... + if err := drm.setStaticMapper(); err != nil { + return err + } + + // ...and return the results of the closure regardless + return checkNeedsReload() +} + +// TODO: wrap reload errors on NoKindMatchError with go 1.13 errors. + +func (drm *dynamicRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionKind{}, err + } + var gvk schema.GroupVersionKind + err := drm.checkAndReload(func() error { + var err error + gvk, err = drm.staticMapper.KindFor(resource) + return err + }) + return gvk, err +} + +func (drm *dynamicRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvks []schema.GroupVersionKind + err := drm.checkAndReload(func() error { + var err error + gvks, err = drm.staticMapper.KindsFor(resource) + return err + }) + return gvks, err +} + +func (drm *dynamicRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return schema.GroupVersionResource{}, err + } + + var gvr schema.GroupVersionResource + err := drm.checkAndReload(func() error { + var err error + gvr, err = drm.staticMapper.ResourceFor(input) + return err + }) + return gvr, err +} + +func (drm *dynamicRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := drm.init(); err != nil { + return nil, err + } + var gvrs []schema.GroupVersionResource + err := drm.checkAndReload(func() error { + var err error + gvrs, err = drm.staticMapper.ResourcesFor(input) + return err + }) + return gvrs, err +} + +func (drm *dynamicRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mapping *meta.RESTMapping + err := drm.checkAndReload(func() error { + var err error + mapping, err = drm.staticMapper.RESTMapping(gk, versions...) + return err + }) + return mapping, err +} + +func (drm *dynamicRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + if err := drm.init(); err != nil { + return nil, err + } + var mappings []*meta.RESTMapping + err := drm.checkAndReload(func() error { + var err error + mappings, err = drm.staticMapper.RESTMappings(gk, versions...) + return err + }) + return mappings, err +} + +func (drm *dynamicRESTMapper) ResourceSingularizer(resource string) (string, error) { + if err := drm.init(); err != nil { + return "", err + } + var singular string + err := drm.checkAndReload(func() error { + var err error + singular, err = drm.staticMapper.ResourceSingularizer(resource) + return err + }) + return singular, err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go new file mode 100644 index 00000000..70c6a11d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/apiutil/lazyrestmapper.go @@ -0,0 +1,248 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apiutil + +import ( + "fmt" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/restmapper" +) + +// lazyRESTMapper is a RESTMapper that will lazily query the provided +// client for discovery information to do REST mappings. +type lazyRESTMapper struct { + mapper meta.RESTMapper + client *discovery.DiscoveryClient + knownGroups map[string]*restmapper.APIGroupResources + apiGroups *metav1.APIGroupList + + // mutex to provide thread-safe mapper reloading. + mu sync.Mutex +} + +// newLazyRESTMapperWithClient initializes a LazyRESTMapper with a custom discovery client. +func newLazyRESTMapperWithClient(discoveryClient *discovery.DiscoveryClient) (meta.RESTMapper, error) { + return &lazyRESTMapper{ + mapper: restmapper.NewDiscoveryRESTMapper([]*restmapper.APIGroupResources{}), + client: discoveryClient, + knownGroups: map[string]*restmapper.APIGroupResources{}, + }, nil +} + +// KindFor implements Mapper.KindFor. +func (m *lazyRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + res, err := m.mapper.KindFor(resource) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return res, err + } + + res, err = m.mapper.KindFor(resource) + } + + return res, err +} + +// KindsFor implements Mapper.KindsFor. +func (m *lazyRESTMapper) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + res, err := m.mapper.KindsFor(resource) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(resource.Group, resource.Version); err != nil { + return res, err + } + + res, err = m.mapper.KindsFor(resource) + } + + return res, err +} + +// ResourceFor implements Mapper.ResourceFor. +func (m *lazyRESTMapper) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + res, err := m.mapper.ResourceFor(input) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return res, err + } + + res, err = m.mapper.ResourceFor(input) + } + + return res, err +} + +// ResourcesFor implements Mapper.ResourcesFor. +func (m *lazyRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + res, err := m.mapper.ResourcesFor(input) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(input.Group, input.Version); err != nil { + return res, err + } + + res, err = m.mapper.ResourcesFor(input) + } + + return res, err +} + +// RESTMapping implements Mapper.RESTMapping. +func (m *lazyRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*meta.RESTMapping, error) { + res, err := m.mapper.RESTMapping(gk, versions...) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return res, err + } + + res, err = m.mapper.RESTMapping(gk, versions...) + } + + return res, err +} + +// RESTMappings implements Mapper.RESTMappings. +func (m *lazyRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*meta.RESTMapping, error) { + res, err := m.mapper.RESTMappings(gk, versions...) + if meta.IsNoMatchError(err) { + if err = m.addKnownGroupAndReload(gk.Group, versions...); err != nil { + return res, err + } + + res, err = m.mapper.RESTMappings(gk, versions...) + } + + return res, err +} + +// ResourceSingularizer implements Mapper.ResourceSingularizer. +func (m *lazyRESTMapper) ResourceSingularizer(resource string) (string, error) { + return m.mapper.ResourceSingularizer(resource) +} + +// addKnownGroupAndReload reloads the mapper with updated information about missing API group. +// versions can be specified for partial updates, for instance for v1beta1 version only. +func (m *lazyRESTMapper) addKnownGroupAndReload(groupName string, versions ...string) error { + m.mu.Lock() + defer m.mu.Unlock() + + // If no specific versions are set by user, we will scan all available ones for the API group. + // This operation requires 2 requests: /api and /apis, but only once. For all subsequent calls + // this data will be taken from cache. + if len(versions) == 0 { + apiGroup, err := m.findAPIGroupByName(groupName) + if err != nil { + return err + } + for _, version := range apiGroup.Versions { + versions = append(versions, version.Version) + } + } + + // Create or fetch group resources from cache. + groupResources := &restmapper.APIGroupResources{ + Group: metav1.APIGroup{Name: groupName}, + VersionedResources: make(map[string][]metav1.APIResource), + } + if _, ok := m.knownGroups[groupName]; ok { + groupResources = m.knownGroups[groupName] + } + + // Update information for group resources about versioned resources. + // The number of API calls is equal to the number of versions: /apis//. + groupVersionResources, err := m.fetchGroupVersionResources(groupName, versions...) + if err != nil { + return fmt.Errorf("failed to get API group resources: %w", err) + } + for version, resources := range groupVersionResources { + groupResources.VersionedResources[version.Version] = resources.APIResources + } + + // Update information for group resources about the API group by adding new versions. + for _, version := range versions { + groupResources.Group.Versions = append(groupResources.Group.Versions, metav1.GroupVersionForDiscovery{ + GroupVersion: metav1.GroupVersion{Group: groupName, Version: version}.String(), + Version: version, + }) + } + + // Update data in the cache. + m.knownGroups[groupName] = groupResources + + // Finally, update the group with received information and regenerate the mapper. + updatedGroupResources := make([]*restmapper.APIGroupResources, 0, len(m.knownGroups)) + for _, agr := range m.knownGroups { + updatedGroupResources = append(updatedGroupResources, agr) + } + + m.mapper = restmapper.NewDiscoveryRESTMapper(updatedGroupResources) + + return nil +} + +// findAPIGroupByName returns API group by its name. +func (m *lazyRESTMapper) findAPIGroupByName(groupName string) (metav1.APIGroup, error) { + // Ensure that required info about existing API groups is received and stored in the mapper. + // It will make 2 API calls to /api and /apis, but only once. + if m.apiGroups == nil { + apiGroups, err := m.client.ServerGroups() + if err != nil { + return metav1.APIGroup{}, fmt.Errorf("failed to get server groups: %w", err) + } + if len(apiGroups.Groups) == 0 { + return metav1.APIGroup{}, fmt.Errorf("received an empty API groups list") + } + + m.apiGroups = apiGroups + } + + for i := range m.apiGroups.Groups { + if groupName == (&m.apiGroups.Groups[i]).Name { + return m.apiGroups.Groups[i], nil + } + } + + return metav1.APIGroup{}, fmt.Errorf("failed to find API group %s", groupName) +} + +// fetchGroupVersionResources fetches the resources for the specified group and its versions. +func (m *lazyRESTMapper) fetchGroupVersionResources(groupName string, versions ...string) (map[schema.GroupVersion]*metav1.APIResourceList, error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + for _, version := range versions { + groupVersion := schema.GroupVersion{Group: groupName, Version: version} + + apiResourceList, err := m.client.ServerResourcesForGroupVersion(groupVersion.String()) + if err != nil { + failedGroups[groupVersion] = err + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned. + groupVersionResources[groupVersion] = apiResourceList + } + } + + if len(failedGroups) > 0 { + return nil, &discovery.ErrGroupDiscoveryFailed{Groups: failedGroups} + } + + return groupVersionResources, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go new file mode 100644 index 00000000..7d1ed5c9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client.go @@ -0,0 +1,482 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "errors" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/metadata" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +// WarningHandlerOptions are options for configuring a +// warning handler for the client which is responsible +// for surfacing API Server warnings. +type WarningHandlerOptions struct { + // SuppressWarnings decides if the warnings from the + // API server are suppressed or surfaced in the client. + SuppressWarnings bool + // AllowDuplicateLogs does not deduplicate the to-be + // logged surfaced warnings messages. See + // log.WarningHandlerOptions for considerations + // regarding deduplication + AllowDuplicateLogs bool +} + +// Options are creation options for a Client. +type Options struct { + // Scheme, if provided, will be used to map go structs to GroupVersionKinds + Scheme *runtime.Scheme + + // Mapper, if provided, will be used to map GroupVersionKinds to Resources + Mapper meta.RESTMapper + + // Opts is used to configure the warning handler responsible for + // surfacing and handling warnings messages sent by the API server. + Opts WarningHandlerOptions +} + +// New returns a new Client using the provided config and Options. +// The returned client reads *and* writes directly from the server +// (it doesn't use object caches). It understands how to work with +// normal types (both custom resources and aggregated/built-in resources), +// as well as unstructured types. +// +// In the case of normal types, the scheme will be used to look up the +// corresponding group, version, and kind for the given type. In the +// case of unstructured types, the group, version, and kind will be extracted +// from the corresponding fields on the object. +func New(config *rest.Config, options Options) (Client, error) { + return newClient(config, options) +} + +func newClient(config *rest.Config, options Options) (*client, error) { + if config == nil { + return nil, fmt.Errorf("must provide non-nil rest.Config to client.New") + } + + if !options.Opts.SuppressWarnings { + // surface warnings + logger := log.Log.WithName("KubeAPIWarningLogger") + // Set a WarningHandler, the default WarningHandler + // is log.KubeAPIWarningLogger with deduplication enabled. + // See log.KubeAPIWarningLoggerOptions for considerations + // regarding deduplication. + config = rest.CopyConfig(config) + config.WarningHandler = log.NewKubeAPIWarningLogger( + logger, + log.KubeAPIWarningLoggerOptions{ + Deduplicate: !options.Opts.AllowDuplicateLogs, + }, + ) + } + + // Init a scheme if none provided + if options.Scheme == nil { + options.Scheme = scheme.Scheme + } + + // Init a Mapper if none provided + if options.Mapper == nil { + var err error + options.Mapper, err = apiutil.NewDynamicRESTMapper(config) + if err != nil { + return nil, err + } + } + + clientcache := &clientCache{ + config: config, + scheme: options.Scheme, + mapper: options.Mapper, + codecs: serializer.NewCodecFactory(options.Scheme), + + structuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + unstructuredResourceByType: make(map[schema.GroupVersionKind]*resourceMeta), + } + + rawMetaClient, err := metadata.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("unable to construct metadata-only client for use as part of client: %w", err) + } + + c := &client{ + typedClient: typedClient{ + cache: clientcache, + paramCodec: runtime.NewParameterCodec(options.Scheme), + }, + unstructuredClient: unstructuredClient{ + cache: clientcache, + paramCodec: noConversionParamCodec{}, + }, + metadataClient: metadataClient{ + client: rawMetaClient, + restMapper: options.Mapper, + }, + scheme: options.Scheme, + mapper: options.Mapper, + } + + return c, nil +} + +var _ Client = &client{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type client struct { + typedClient typedClient + unstructuredClient unstructuredClient + metadataClient metadataClient + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// resetGroupVersionKind is a helper function to restore and preserve GroupVersionKind on an object. +func (c *client) resetGroupVersionKind(obj runtime.Object, gvk schema.GroupVersionKind) { + if gvk != schema.EmptyObjectKind.GroupVersionKind() { + if v, ok := obj.(schema.ObjectKind); ok { + v.SetGroupVersionKind(gvk) + } + } +} + +// Scheme returns the scheme this client is using. +func (c *client) Scheme() *runtime.Scheme { + return c.scheme +} + +// RESTMapper returns the scheme this client is using. +func (c *client) RESTMapper() meta.RESTMapper { + return c.mapper +} + +// Create implements client.Client. +func (c *client) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Create(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot create using only metadata") + default: + return c.typedClient.Create(ctx, obj, opts...) + } +} + +// Update implements client.Client. +func (c *client) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Update(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update using only metadata -- did you mean to patch?") + default: + return c.typedClient.Update(ctx, obj, opts...) + } +} + +// Delete implements client.Client. +func (c *client) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Delete(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Delete(ctx, obj, opts...) + default: + return c.typedClient.Delete(ctx, obj, opts...) + } +} + +// DeleteAllOf implements client.Client. +func (c *client) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.DeleteAllOf(ctx, obj, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.DeleteAllOf(ctx, obj, opts...) + default: + return c.typedClient.DeleteAllOf(ctx, obj, opts...) + } +} + +// Patch implements client.Client. +func (c *client) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Patch(ctx, obj, patch, opts...) + case *metav1.PartialObjectMetadata: + return c.metadataClient.Patch(ctx, obj, patch, opts...) + default: + return c.typedClient.Patch(ctx, obj, patch, opts...) + } +} + +// Get implements client.Client. +func (c *client) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return c.unstructuredClient.Get(ctx, key, obj, opts...) + case *metav1.PartialObjectMetadata: + // Metadata only object should always preserve the GVK coming in from the caller. + defer c.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + return c.metadataClient.Get(ctx, key, obj, opts...) + default: + return c.typedClient.Get(ctx, key, obj, opts...) + } +} + +// List implements client.Client. +func (c *client) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + switch x := obj.(type) { + case *unstructured.UnstructuredList: + return c.unstructuredClient.List(ctx, obj, opts...) + case *metav1.PartialObjectMetadataList: + // Metadata only object should always preserve the GVK. + gvk := obj.GetObjectKind().GroupVersionKind() + defer c.resetGroupVersionKind(obj, gvk) + + // Call the list client. + if err := c.metadataClient.List(ctx, obj, opts...); err != nil { + return err + } + + // Restore the GVK for each item in the list. + itemGVK := schema.GroupVersionKind{ + Group: gvk.Group, + Version: gvk.Version, + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + Kind: strings.TrimSuffix(gvk.Kind, "List"), + } + for i := range x.Items { + item := &x.Items[i] + item.SetGroupVersionKind(itemGVK) + } + + return nil + default: + return c.typedClient.List(ctx, obj, opts...) + } +} + +// Status implements client.StatusClient. +func (c *client) Status() SubResourceWriter { + return c.SubResource("status") +} + +func (c *client) SubResource(subResource string) SubResourceClient { + return &subResourceClient{client: c, subResource: subResource} +} + +// subResourceClient is client.SubResourceWriter that writes to subresources. +type subResourceClient struct { + client *client + subResource string +} + +// ensure subResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &subResourceClient{} + +// SubResourceGetOptions holds all the possible configuration +// for a subresource Get request. +type SubResourceGetOptions struct { + Raw *metav1.GetOptions +} + +// ApplyToSubResourceGet updates the configuaration to the given get options. +func (getOpt *SubResourceGetOptions) ApplyToSubResourceGet(o *SubResourceGetOptions) { + if getOpt.Raw != nil { + o.Raw = getOpt.Raw + } +} + +// ApplyOptions applues the given options. +func (getOpt *SubResourceGetOptions) ApplyOptions(opts []SubResourceGetOption) *SubResourceGetOptions { + for _, o := range opts { + o.ApplyToSubResourceGet(getOpt) + } + + return getOpt +} + +// AsGetOptions returns the configured options as *metav1.GetOptions. +func (getOpt *SubResourceGetOptions) AsGetOptions() *metav1.GetOptions { + if getOpt.Raw == nil { + return &metav1.GetOptions{} + } + return getOpt.Raw +} + +// SubResourceUpdateOptions holds all the possible configuration +// for a subresource update request. +type SubResourceUpdateOptions struct { + UpdateOptions + SubResourceBody Object +} + +// ApplyToSubResourceUpdate updates the configuration on the given create options +func (uo *SubResourceUpdateOptions) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + uo.UpdateOptions.ApplyToUpdate(&o.UpdateOptions) + if uo.SubResourceBody != nil { + o.SubResourceBody = uo.SubResourceBody + } +} + +// ApplyOptions applies the given options. +func (uo *SubResourceUpdateOptions) ApplyOptions(opts []SubResourceUpdateOption) *SubResourceUpdateOptions { + for _, o := range opts { + o.ApplyToSubResourceUpdate(uo) + } + + return uo +} + +// SubResourceUpdateAndPatchOption is an option that can be used for either +// a subresource update or patch request. +type SubResourceUpdateAndPatchOption interface { + SubResourceUpdateOption + SubResourcePatchOption +} + +// WithSubResourceBody returns an option that uses the given body +// for a subresource Update or Patch operation. +func WithSubResourceBody(body Object) SubResourceUpdateAndPatchOption { + return &withSubresourceBody{body: body} +} + +type withSubresourceBody struct { + body Object +} + +func (wsr *withSubresourceBody) ApplyToSubResourceUpdate(o *SubResourceUpdateOptions) { + o.SubResourceBody = wsr.body +} + +func (wsr *withSubresourceBody) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + o.SubResourceBody = wsr.body +} + +// SubResourceCreateOptions are all the possible configurations for a subresource +// create request. +type SubResourceCreateOptions struct { + CreateOptions +} + +// ApplyOptions applies the given options. +func (co *SubResourceCreateOptions) ApplyOptions(opts []SubResourceCreateOption) *SubResourceCreateOptions { + for _, o := range opts { + o.ApplyToSubResourceCreate(co) + } + + return co +} + +// ApplyToSubresourceCreate applies the the configuration on the given create options. +func (co *SubResourceCreateOptions) ApplyToSubresourceCreate(o *SubResourceCreateOptions) { + co.CreateOptions.ApplyToCreate(&co.CreateOptions) +} + +// SubResourcePatchOptions holds all possible configurations for a subresource patch +// request. +type SubResourcePatchOptions struct { + PatchOptions + SubResourceBody Object +} + +// ApplyOptions applies the given options. +func (po *SubResourcePatchOptions) ApplyOptions(opts []SubResourcePatchOption) *SubResourcePatchOptions { + for _, o := range opts { + o.ApplyToSubResourcePatch(po) + } + + return po +} + +// ApplyToSubResourcePatch applies the configuration on the given patch options. +func (po *SubResourcePatchOptions) ApplyToSubResourcePatch(o *SubResourcePatchOptions) { + po.PatchOptions.ApplyToPatch(&o.PatchOptions) + if po.SubResourceBody != nil { + o.SubResourceBody = po.SubResourceBody + } +} + +func (sc *subResourceClient) Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error { + switch obj.(type) { + case *unstructured.Unstructured: + return sc.client.unstructuredClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return errors.New("can not get subresource using only metadata") + default: + return sc.client.typedClient.GetSubResource(ctx, obj, subResource, sc.subResource, opts...) + } +} + +// Create implements client.SubResourceClient +func (sc *subResourceClient) Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + defer sc.client.resetGroupVersionKind(subResource, subResource.GetObjectKind().GroupVersionKind()) + + switch obj.(type) { + case *unstructured.Unstructured: + return sc.client.unstructuredClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sc.client.typedClient.CreateSubResource(ctx, obj, subResource, sc.subResource, opts...) + } +} + +// Update implements client.SubResourceClient +func (sc *subResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sc.client.unstructuredClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + case *metav1.PartialObjectMetadata: + return fmt.Errorf("cannot update status using only metadata -- did you mean to patch?") + default: + return sc.client.typedClient.UpdateSubResource(ctx, obj, sc.subResource, opts...) + } +} + +// Patch implements client.SubResourceWriter. +func (sc *subResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + defer sc.client.resetGroupVersionKind(obj, obj.GetObjectKind().GroupVersionKind()) + switch obj.(type) { + case *unstructured.Unstructured: + return sc.client.unstructuredClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) + case *metav1.PartialObjectMetadata: + return sc.client.metadataClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) + default: + return sc.client.typedClient.PatchSubResource(ctx, obj, sc.subResource, patch, opts...) + } +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go new file mode 100644 index 00000000..857a0b38 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/client_cache.go @@ -0,0 +1,150 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// clientCache creates and caches rest clients and metadata for Kubernetes types. +type clientCache struct { + // config is the rest.Config to talk to an apiserver + config *rest.Config + + // scheme maps go structs to GroupVersionKinds + scheme *runtime.Scheme + + // mapper maps GroupVersionKinds to Resources + mapper meta.RESTMapper + + // codecs are used to create a REST client for a gvk + codecs serializer.CodecFactory + + // structuredResourceByType caches structured type metadata + structuredResourceByType map[schema.GroupVersionKind]*resourceMeta + // unstructuredResourceByType caches unstructured type metadata + unstructuredResourceByType map[schema.GroupVersionKind]*resourceMeta + mu sync.RWMutex +} + +// newResource maps obj to a Kubernetes Resource and constructs a client for that Resource. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) newResource(gvk schema.GroupVersionKind, isList, isUnstructured bool) (*resourceMeta, error) { + if strings.HasSuffix(gvk.Kind, "List") && isList { + // if this was a list, treat it as a request for the item's resource + gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] + } + + client, err := apiutil.RESTClientForGVK(gvk, isUnstructured, c.config, c.codecs) + if err != nil { + return nil, err + } + mapping, err := c.mapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + return &resourceMeta{Interface: client, mapping: mapping, gvk: gvk}, nil +} + +// getResource returns the resource meta information for the given type of object. +// If the object is a list, the resource represents the item's type instead. +func (c *clientCache) getResource(obj runtime.Object) (*resourceMeta, error) { + gvk, err := apiutil.GVKForObject(obj, c.scheme) + if err != nil { + return nil, err + } + + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + isUnstructured = isUnstructured || isUnstructuredList + + // It's better to do creation work twice than to not let multiple + // people make requests at once + c.mu.RLock() + resourceByType := c.structuredResourceByType + if isUnstructured { + resourceByType = c.unstructuredResourceByType + } + r, known := resourceByType[gvk] + c.mu.RUnlock() + + if known { + return r, nil + } + + // Initialize a new Client + c.mu.Lock() + defer c.mu.Unlock() + r, err = c.newResource(gvk, meta.IsListType(obj), isUnstructured) + if err != nil { + return nil, err + } + resourceByType[gvk] = r + return r, err +} + +// getObjMeta returns objMeta containing both type and object metadata and state. +func (c *clientCache) getObjMeta(obj runtime.Object) (*objMeta, error) { + r, err := c.getResource(obj) + if err != nil { + return nil, err + } + m, err := meta.Accessor(obj) + if err != nil { + return nil, err + } + return &objMeta{resourceMeta: r, Object: m}, err +} + +// resourceMeta caches state for a Kubernetes type. +type resourceMeta struct { + // client is the rest client used to talk to the apiserver + rest.Interface + // gvk is the GroupVersionKind of the resourceMeta + gvk schema.GroupVersionKind + // mapping is the rest mapping + mapping *meta.RESTMapping +} + +// isNamespaced returns true if the type is namespaced. +func (r *resourceMeta) isNamespaced() bool { + return r.mapping.Scope.Name() != meta.RESTScopeNameRoot +} + +// resource returns the resource name of the type. +func (r *resourceMeta) resource() string { + return r.mapping.Resource.Resource +} + +// objMeta stores type and object information about a Kubernetes type. +type objMeta struct { + // resourceMeta contains type information for the object + *resourceMeta + + // Object contains meta data for the object instance + metav1.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go new file mode 100644 index 00000000..9c292310 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/codec.go @@ -0,0 +1,40 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "errors" + "net/url" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.ParameterCodec = noConversionParamCodec{} + +// noConversionParamCodec is a no-conversion codec for serializing parameters into URL query strings. +// it's useful in scenarios with the unstructured client and arbitrary resources. +type noConversionParamCodec struct{} + +func (noConversionParamCodec) EncodeParameters(obj runtime.Object, to schema.GroupVersion) (url.Values, error) { + return queryparams.Convert(obj) +} + +func (noConversionParamCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into runtime.Object) error { + return errors.New("DecodeParameters not implemented on noConversionParamCodec") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go new file mode 100644 index 00000000..e0e28850 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/doc.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package client contains functionality for interacting with Kubernetes API +// servers. +// +// # Clients +// +// Clients are split into two interfaces -- Readers and Writers. Readers +// get and list, while writers create, update, and delete. +// +// The New function can be used to create a new client that talks directly +// to the API server. +// +// It is a common pattern in Kubernetes to read from a cache and write to the API +// server. This pattern is covered by the DelegatingClient type, which can +// be used to have a client whose Reader is different from the Writer. +// +// # Options +// +// Many client operations in Kubernetes support options. These options are +// represented as variadic arguments at the end of a given method call. +// For instance, to use a label selector on list, you can call +// +// err := someReader.List(context.Background(), &podList, client.MatchingLabels{"somelabel": "someval"}) +// +// # Indexing +// +// Indexes may be added to caches using a FieldIndexer. This allows you to easily +// and efficiently look up objects with certain properties. You can then make +// use of the index by specifying a field selector on calls to List on the Reader +// corresponding to the given Cache. +// +// For instance, a Secret controller might have an index on the +// `.spec.volumes.secret.secretName` field in Pod objects, so that it could +// easily look up all pods that reference a given secret. +package client diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go new file mode 100644 index 00000000..73b56429 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/dryrun.go @@ -0,0 +1,119 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" +) + +// NewDryRunClient wraps an existing client and enforces DryRun mode +// on all mutating api calls. +func NewDryRunClient(c Client) Client { + return &dryRunClient{client: c} +} + +var _ Client = &dryRunClient{} + +// dryRunClient is a Client that wraps another Client in order to enforce DryRun mode. +type dryRunClient struct { + client Client +} + +// Scheme returns the scheme this client is using. +func (c *dryRunClient) Scheme() *runtime.Scheme { + return c.client.Scheme() +} + +// RESTMapper returns the rest mapper this client is using. +func (c *dryRunClient) RESTMapper() meta.RESTMapper { + return c.client.RESTMapper() +} + +// Create implements client.Client. +func (c *dryRunClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + return c.client.Create(ctx, obj, append(opts, DryRunAll)...) +} + +// Update implements client.Client. +func (c *dryRunClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + return c.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Delete implements client.Client. +func (c *dryRunClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + return c.client.Delete(ctx, obj, append(opts, DryRunAll)...) +} + +// DeleteAllOf implements client.Client. +func (c *dryRunClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + return c.client.DeleteAllOf(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.Client. +func (c *dryRunClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + return c.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} + +// Get implements client.Client. +func (c *dryRunClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + return c.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (c *dryRunClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + return c.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (c *dryRunClient) Status() SubResourceWriter { + return c.SubResource("status") +} + +// SubResource implements client.SubResourceClient. +func (c *dryRunClient) SubResource(subResource string) SubResourceClient { + return &dryRunSubResourceClient{client: c.client.SubResource(subResource)} +} + +// ensure dryRunSubResourceWriter implements client.SubResourceWriter. +var _ SubResourceWriter = &dryRunSubResourceClient{} + +// dryRunSubResourceClient is client.SubResourceWriter that writes status subresource with dryRun mode +// enforced. +type dryRunSubResourceClient struct { + client SubResourceClient +} + +func (sw *dryRunSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + return sw.client.Get(ctx, obj, subResource, opts...) +} + +func (sw *dryRunSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + return sw.client.Create(ctx, obj, subResource, append(opts, DryRunAll)...) +} + +// Update implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + return sw.client.Update(ctx, obj, append(opts, DryRunAll)...) +} + +// Patch implements client.SubResourceWriter. +func (sw *dryRunSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + return sw.client.Patch(ctx, obj, patch, append(opts, DryRunAll)...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go new file mode 100644 index 00000000..b642f7f8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/interfaces.go @@ -0,0 +1,217 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/watch" +) + +// ObjectKey identifies a Kubernetes Object. +type ObjectKey = types.NamespacedName + +// ObjectKeyFromObject returns the ObjectKey given a runtime.Object. +func ObjectKeyFromObject(obj Object) ObjectKey { + return ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()} +} + +// Patch is a patch that can be applied to a Kubernetes object. +type Patch interface { + // Type is the PatchType of the patch. + Type() types.PatchType + // Data is the raw data representing the patch. + Data(obj Object) ([]byte, error) +} + +// TODO(directxman12): is there a sane way to deal with get/delete options? + +// Reader knows how to read and list Kubernetes objects. +type Reader interface { + // Get retrieves an obj for the given object key from the Kubernetes Cluster. + // obj must be a struct pointer so that obj can be updated with the response + // returned by the Server. + Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error + + // List retrieves list of objects for a given namespace and list options. On a + // successful call, Items field in the list will be populated with the + // result returned from the server. + List(ctx context.Context, list ObjectList, opts ...ListOption) error +} + +// Writer knows how to create, delete, and update Kubernetes objects. +type Writer interface { + // Create saves the object obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Create(ctx context.Context, obj Object, opts ...CreateOption) error + + // Delete deletes the given obj from Kubernetes cluster. + Delete(ctx context.Context, obj Object, opts ...DeleteOption) error + + // Update updates the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...UpdateOption) error + + // Patch patches the given obj in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error + + // DeleteAllOf deletes all objects of the given type matching the given options. + DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error +} + +// StatusClient knows how to create a client which can update status subresource +// for kubernetes objects. +type StatusClient interface { + Status() SubResourceWriter +} + +// SubResourceClientConstructor knows how to create a client which can update subresource +// for kubernetes objects. +type SubResourceClientConstructor interface { + // SubResourceClientConstructor returns a subresource client for the named subResource. Known + // upstream subResources usages are: + // - ServiceAccount token creation: + // sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // token := &authenticationv1.TokenRequest{} + // c.SubResourceClient("token").Create(ctx, sa, token) + // + // - Pod eviction creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // c.SubResourceClient("eviction").Create(ctx, pod, &policyv1.Eviction{}) + // + // - Pod binding creation: + // pod := &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // binding := &corev1.Binding{Target: corev1.ObjectReference{Name: "my-node"}} + // c.SubResourceClient("binding").Create(ctx, pod, binding) + // + // - CertificateSigningRequest approval: + // csr := &certificatesv1.CertificateSigningRequest{ + // ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}, + // Status: certificatesv1.CertificateSigningRequestStatus{ + // Conditions: []certificatesv1.[]CertificateSigningRequestCondition{{ + // Type: certificatesv1.CertificateApproved, + // Status: corev1.ConditionTrue, + // }}, + // }, + // } + // c.SubResourceClient("approval").Update(ctx, csr) + // + // - Scale retrieval: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{} + // c.SubResourceClient("scale").Get(ctx, dep, scale) + // + // - Scale update: + // dep := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "foo", Name: "bar"}} + // scale := &autoscalingv1.Scale{Spec: autoscalingv1.ScaleSpec{Replicas: 2}} + // c.SubResourceClient("scale").Update(ctx, dep, client.WithSubResourceBody(scale)) + SubResource(subResource string) SubResourceClient +} + +// StatusWriter is kept for backward compatibility. +type StatusWriter = SubResourceWriter + +// SubResourceReader knows how to read SubResources +type SubResourceReader interface { + Get(ctx context.Context, obj Object, subResource Object, opts ...SubResourceGetOption) error +} + +// SubResourceWriter knows how to update subresource of a Kubernetes object. +type SubResourceWriter interface { + // Create saves the subResource object in the Kubernetes cluster. obj must be a + // struct pointer so that obj can be updated with the content returned by the Server. + Create(ctx context.Context, obj Object, subResource Object, opts ...SubResourceCreateOption) error + // Update updates the fields corresponding to the status subresource for the + // given obj. obj must be a struct pointer so that obj can be updated + // with the content returned by the Server. + Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error + + // Patch patches the given object's subresource. obj must be a struct + // pointer so that obj can be updated with the content returned by the + // Server. + Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error +} + +// SubResourceClient knows how to perform CRU operations on Kubernetes objects. +type SubResourceClient interface { + SubResourceReader + SubResourceWriter +} + +// Client knows how to perform CRUD operations on Kubernetes objects. +type Client interface { + Reader + Writer + StatusClient + SubResourceClientConstructor + + // Scheme returns the scheme this client is using. + Scheme() *runtime.Scheme + // RESTMapper returns the rest this client is using. + RESTMapper() meta.RESTMapper +} + +// WithWatch supports Watch on top of the CRUD operations supported by +// the normal Client. Its intended use-case are CLI apps that need to wait for +// events. +type WithWatch interface { + Client + Watch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) +} + +// IndexerFunc knows how to take an object and turn it into a series +// of non-namespaced keys. Namespaced objects are automatically given +// namespaced and non-spaced variants, so keys do not need to include namespace. +type IndexerFunc func(Object) []string + +// FieldIndexer knows how to index over a particular "field" such that it +// can later be used by a field selector. +type FieldIndexer interface { + // IndexFields adds an index with the given field name on the given object type + // by using the given function to extract the value for that field. If you want + // compatibility with the Kubernetes API server, only return one key, and only use + // fields that the API server supports. Otherwise, you can return multiple keys, + // and "equality" in the field selector means that at least one key matches the value. + // The FieldIndexer will automatically take care of indexing over namespace + // and supporting efficient all-namespace queries. + IndexField(ctx context.Context, obj Object, field string, extractValue IndexerFunc) error +} + +// IgnoreNotFound returns nil on NotFound errors. +// All other values that are not NotFound errors or nil are returned unmodified. +func IgnoreNotFound(err error) error { + if apierrors.IsNotFound(err) { + return nil + } + return err +} + +// IgnoreAlreadyExists returns nil on AlreadyExists errors. +// All other values that are not AlreadyExists errors or nil are returned unmodified. +func IgnoreAlreadyExists(err error) error { + if apierrors.IsAlreadyExists(err) { + return nil + } + + return err +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go new file mode 100644 index 00000000..d0c6b8e1 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/metadata_client.go @@ -0,0 +1,204 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/metadata" +) + +// TODO(directxman12): we could rewrite this on top of the low-level REST +// client to avoid the extra shallow copy at the end, but I'm not sure it's +// worth it -- the metadata client deals with falling back to loading the whole +// object on older API servers, etc, and we'd have to reproduce that. + +// metadataClient is a client that reads & writes metadata-only requests to/from the API server. +type metadataClient struct { + client metadata.Interface + restMapper meta.RESTMapper +} + +func (mc *metadataClient) getResourceInterface(gvk schema.GroupVersionKind, ns string) (metadata.ResourceInterface, error) { + mapping, err := mc.restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return nil, err + } + if mapping.Scope.Name() == meta.RESTScopeNameRoot { + return mc.client.Resource(mapping.Resource), nil + } + return mc.client.Resource(mapping.Resource).Namespace(ns), nil +} + +// Delete implements client.Client. +func (mc *metadataClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), metadata.Namespace) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return resInt.Delete(ctx, metadata.Name, *deleteOpts.AsDeleteOptions()) +} + +// DeleteAllOf implements client.Client. +func (mc *metadataClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(metadata.GroupVersionKind(), deleteAllOfOpts.ListOptions.Namespace) + if err != nil { + return err + } + + return resInt.DeleteCollection(ctx, *deleteAllOfOpts.AsDeleteOptions(), *deleteAllOfOpts.AsListOptions()) +} + +// Patch implements client.Client. +func (mc *metadataClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// Get implements client.Client. +func (mc *metadataClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, key.Namespace) + if err != nil { + return err + } + + res, err := resInt.Get(ctx, key.Name, *getOpts.AsGetOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +// List implements client.Client. +func (mc *metadataClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadataList) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + resInt, err := mc.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return err + } + + res, err := resInt.List(ctx, *listOpts.AsListOptions()) + if err != nil { + return err + } + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} + +func (mc *metadataClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + metadata, ok := obj.(*metav1.PartialObjectMetadata) + if !ok { + return fmt.Errorf("metadata client did not understand object: %T", obj) + } + + gvk := metadata.GroupVersionKind() + resInt, err := mc.getResourceInterface(gvk, metadata.Namespace) + if err != nil { + return err + } + + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) + if err != nil { + return err + } + + res, err := resInt.Patch(ctx, metadata.Name, patch.Type(), data, *patchOpts.AsPatchOptions(), subResource) + if err != nil { + return err + } + + *metadata = *res + metadata.SetGroupVersionKind(gvk) // restore the GVK, which isn't set on metadata + return nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go new file mode 100644 index 00000000..00bc2175 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/namespaced_client.go @@ -0,0 +1,253 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" +) + +// NewNamespacedClient wraps an existing client enforcing the namespace value. +// All functions using this client will have the same namespace declared here. +func NewNamespacedClient(c Client, ns string) Client { + return &namespacedClient{ + client: c, + namespace: ns, + } +} + +var _ Client = &namespacedClient{} + +// namespacedClient is a Client that wraps another Client in order to enforce the specified namespace value. +type namespacedClient struct { + namespace string + client Client +} + +// Scheme returns the scheme this client is using. +func (n *namespacedClient) Scheme() *runtime.Scheme { + return n.client.Scheme() +} + +// RESTMapper returns the scheme this client is using. +func (n *namespacedClient) RESTMapper() meta.RESTMapper { + return n.client.RESTMapper() +} + +// Create implements client.Client. +func (n *namespacedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Create(ctx, obj, opts...) +} + +// Update implements client.Client. +func (n *namespacedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Update(ctx, obj, opts...) +} + +// Delete implements client.Client. +func (n *namespacedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Delete(ctx, obj, opts...) +} + +// DeleteAllOf implements client.Client. +func (n *namespacedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + if isNamespaceScoped { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.DeleteAllOf(ctx, obj, opts...) +} + +// Patch implements client.Client. +func (n *namespacedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != n.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), n.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(n.namespace) + } + return n.client.Patch(ctx, obj, patch, opts...) +} + +// Get implements client.Client. +func (n *namespacedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, n.Scheme(), n.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + if isNamespaceScoped { + if key.Namespace != "" && key.Namespace != n.namespace { + return fmt.Errorf("namespace %s provided for the object %s does not match the namespace %s on the client", key.Namespace, obj.GetName(), n.namespace) + } + key.Namespace = n.namespace + } + return n.client.Get(ctx, key, obj, opts...) +} + +// List implements client.Client. +func (n *namespacedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + if n.namespace != "" { + opts = append(opts, InNamespace(n.namespace)) + } + return n.client.List(ctx, obj, opts...) +} + +// Status implements client.StatusClient. +func (n *namespacedClient) Status() SubResourceWriter { + return n.SubResource("status") +} + +// SubResource implements client.SubResourceClient. +func (n *namespacedClient) SubResource(subResource string) SubResourceClient { + return &namespacedClientSubResourceClient{client: n.client.SubResource(subResource), namespace: n.namespace, namespacedclient: n} +} + +// ensure namespacedClientSubResourceClient implements client.SubResourceClient. +var _ SubResourceClient = &namespacedClientSubResourceClient{} + +type namespacedClientSubResourceClient struct { + client SubResourceClient + namespace string + namespacedclient Client +} + +func (nsw *namespacedClientSubResourceClient) Get(ctx context.Context, obj, subResource Object, opts ...SubResourceGetOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + + return nsw.client.Get(ctx, obj, subResource, opts...) +} + +func (nsw *namespacedClientSubResourceClient) Create(ctx context.Context, obj, subResource Object, opts ...SubResourceCreateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + + return nsw.client.Create(ctx, obj, subResource, opts...) +} + +// Update implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Update(ctx context.Context, obj Object, opts ...SubResourceUpdateOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.client.Update(ctx, obj, opts...) +} + +// Patch implements client.SubResourceWriter. +func (nsw *namespacedClientSubResourceClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...SubResourcePatchOption) error { + isNamespaceScoped, err := objectutil.IsAPINamespaced(obj, nsw.namespacedclient.Scheme(), nsw.namespacedclient.RESTMapper()) + + if err != nil { + return fmt.Errorf("error finding the scope of the object: %w", err) + } + + objectNamespace := obj.GetNamespace() + if objectNamespace != nsw.namespace && objectNamespace != "" { + return fmt.Errorf("namespace %s of the object %s does not match the namespace %s on the client", objectNamespace, obj.GetName(), nsw.namespace) + } + + if isNamespaceScoped && objectNamespace == "" { + obj.SetNamespace(nsw.namespace) + } + return nsw.client.Patch(ctx, obj, patch, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go new file mode 100644 index 00000000..31e334d6 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/object.go @@ -0,0 +1,77 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// Object is a Kubernetes object, allows functions to work indistinctly with +// any resource that implements both Object interfaces. +// +// Semantically, these are objects which are both serializable (runtime.Object) +// and identifiable (metav1.Object) -- think any object which you could write +// as YAML or JSON, and then `kubectl create`. +// +// Code-wise, this means that any object which embeds both ObjectMeta (which +// provides metav1.Object) and TypeMeta (which provides half of runtime.Object) +// and has a `DeepCopyObject` implementation (the other half of runtime.Object) +// will implement this by default. +// +// For example, nearly all the built-in types are Objects, as well as all +// KubeBuilder-generated CRDs (unless you do something real funky to them). +// +// By and large, most things that implement runtime.Object also implement +// Object -- it's very rare to have *just* a runtime.Object implementation (the +// cases tend to be funky built-in types like Webhook payloads that don't have +// a `metadata` field). +// +// Notice that XYZList types are distinct: they implement ObjectList instead. +type Object interface { + metav1.Object + runtime.Object +} + +// ObjectList is a Kubernetes object list, allows functions to work +// indistinctly with any resource that implements both runtime.Object and +// metav1.ListInterface interfaces. +// +// Semantically, this is any object which may be serialized (ObjectMeta), and +// is a kubernetes list wrapper (has items, pagination fields, etc) -- think +// the wrapper used in a response from a `kubectl list --output yaml` call. +// +// Code-wise, this means that any object which embedds both ListMeta (which +// provides metav1.ListInterface) and TypeMeta (which provides half of +// runtime.Object) and has a `DeepCopyObject` implementation (the other half of +// runtime.Object) will implement this by default. +// +// For example, nearly all the built-in XYZList types are ObjectLists, as well +// as the XYZList types for all KubeBuilder-generated CRDs (unless you do +// something real funky to them). +// +// By and large, most things that are XYZList and implement runtime.Object also +// implement ObjectList -- it's very rare to have *just* a runtime.Object +// implementation (the cases tend to be funky built-in types like Webhook +// payloads that don't have a `metadata` field). +// +// This is similar to Object, which is almost always implemented by the items +// in the list themselves. +type ObjectList interface { + metav1.ListInterface + runtime.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go new file mode 100644 index 00000000..7f6f5b83 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/options.go @@ -0,0 +1,821 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +// {{{ "Functional" Option Interfaces + +// CreateOption is some configuration that modifies options for a create request. +type CreateOption interface { + // ApplyToCreate applies this configuration to the given create options. + ApplyToCreate(*CreateOptions) +} + +// DeleteOption is some configuration that modifies options for a delete request. +type DeleteOption interface { + // ApplyToDelete applies this configuration to the given delete options. + ApplyToDelete(*DeleteOptions) +} + +// GetOption is some configuration that modifies options for a get request. +type GetOption interface { + // ApplyToGet applies this configuration to the given get options. + ApplyToGet(*GetOptions) +} + +// ListOption is some configuration that modifies options for a list request. +type ListOption interface { + // ApplyToList applies this configuration to the given list options. + ApplyToList(*ListOptions) +} + +// UpdateOption is some configuration that modifies options for a update request. +type UpdateOption interface { + // ApplyToUpdate applies this configuration to the given update options. + ApplyToUpdate(*UpdateOptions) +} + +// PatchOption is some configuration that modifies options for a patch request. +type PatchOption interface { + // ApplyToPatch applies this configuration to the given patch options. + ApplyToPatch(*PatchOptions) +} + +// DeleteAllOfOption is some configuration that modifies options for a delete request. +type DeleteAllOfOption interface { + // ApplyToDeleteAllOf applies this configuration to the given deletecollection options. + ApplyToDeleteAllOf(*DeleteAllOfOptions) +} + +// SubResourceGetOption modifies options for a SubResource Get request. +type SubResourceGetOption interface { + ApplyToSubResourceGet(*SubResourceGetOptions) +} + +// SubResourceUpdateOption is some configuration that modifies options for a update request. +type SubResourceUpdateOption interface { + // ApplyToSubResourceUpdate applies this configuration to the given update options. + ApplyToSubResourceUpdate(*SubResourceUpdateOptions) +} + +// SubResourceCreateOption is some configuration that modifies options for a create request. +type SubResourceCreateOption interface { + // ApplyToSubResourceCreate applies this configuration to the given create options. + ApplyToSubResourceCreate(*SubResourceCreateOptions) +} + +// SubResourcePatchOption configures a subresource patch request. +type SubResourcePatchOption interface { + // ApplyToSubResourcePatch applies the configuration on the given patch options. + ApplyToSubResourcePatch(*SubResourcePatchOptions) +} + +// }}} + +// {{{ Multi-Type Options + +// DryRunAll sets the "dry run" option to "all", executing all +// validation, etc without persisting the change to storage. +var DryRunAll = dryRunAll{} + +type dryRunAll struct{} + +// ApplyToCreate applies this configuration to the given create options. +func (dryRunAll) ApplyToCreate(opts *CreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToUpdate applies this configuration to the given update options. +func (dryRunAll) ApplyToUpdate(opts *UpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given patch options. +func (dryRunAll) ApplyToPatch(opts *PatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// ApplyToPatch applies this configuration to the given delete options. +func (dryRunAll) ApplyToDelete(opts *DeleteOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +func (dryRunAll) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.DryRun = []string{metav1.DryRunAll} +} + +// FieldOwner set the field manager name for the given server-side apply patch. +type FieldOwner string + +// ApplyToPatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToPatch(opts *PatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToCreate(opts *CreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToUpdate(opts *UpdateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourcePatch applies this configuration to the given patch options. +func (f FieldOwner) ApplyToSubResourcePatch(opts *SubResourcePatchOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceCreate applies this configuration to the given create options. +func (f FieldOwner) ApplyToSubResourceCreate(opts *SubResourceCreateOptions) { + opts.FieldManager = string(f) +} + +// ApplyToSubResourceUpdate applies this configuration to the given update options. +func (f FieldOwner) ApplyToSubResourceUpdate(opts *SubResourceUpdateOptions) { + opts.FieldManager = string(f) +} + +// }}} + +// {{{ Create Options + +// CreateOptions contains options for create requests. It's generally a subset +// of metav1.CreateOptions. +type CreateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw CreateOptions, as passed to the API server. + Raw *metav1.CreateOptions +} + +// AsCreateOptions returns these options as a metav1.CreateOptions. +// This may mutate the Raw field. +func (o *CreateOptions) AsCreateOptions() *metav1.CreateOptions { + if o == nil { + return &metav1.CreateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.CreateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given create options on these options, +// and then returns itself (for convenient chaining). +func (o *CreateOptions) ApplyOptions(opts []CreateOption) *CreateOptions { + for _, opt := range opts { + opt.ApplyToCreate(o) + } + return o +} + +// ApplyToCreate implements CreateOption. +func (o *CreateOptions) ApplyToCreate(co *CreateOptions) { + if o.DryRun != nil { + co.DryRun = o.DryRun + } + if o.FieldManager != "" { + co.FieldManager = o.FieldManager + } + if o.Raw != nil { + co.Raw = o.Raw + } +} + +var _ CreateOption = &CreateOptions{} + +// }}} + +// {{{ Delete Options + +// DeleteOptions contains options for delete requests. It's generally a subset +// of metav1.DeleteOptions. +type DeleteOptions struct { + // GracePeriodSeconds is the duration in seconds before the object should be + // deleted. Value must be non-negative integer. The value zero indicates + // delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + GracePeriodSeconds *int64 + + // Preconditions must be fulfilled before a deletion is carried out. If not + // possible, a 409 Conflict status will be returned. + Preconditions *metav1.Preconditions + + // PropagationPolicy determined whether and how garbage collection will be + // performed. Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + PropagationPolicy *metav1.DeletionPropagation + + // Raw represents raw DeleteOptions, as passed to the API server. + Raw *metav1.DeleteOptions + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string +} + +// AsDeleteOptions returns these options as a metav1.DeleteOptions. +// This may mutate the Raw field. +func (o *DeleteOptions) AsDeleteOptions() *metav1.DeleteOptions { + if o == nil { + return &metav1.DeleteOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.DeleteOptions{} + } + + o.Raw.GracePeriodSeconds = o.GracePeriodSeconds + o.Raw.Preconditions = o.Preconditions + o.Raw.PropagationPolicy = o.PropagationPolicy + o.Raw.DryRun = o.DryRun + return o.Raw +} + +// ApplyOptions applies the given delete options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteOptions) ApplyOptions(opts []DeleteOption) *DeleteOptions { + for _, opt := range opts { + opt.ApplyToDelete(o) + } + return o +} + +var _ DeleteOption = &DeleteOptions{} + +// ApplyToDelete implements DeleteOption. +func (o *DeleteOptions) ApplyToDelete(do *DeleteOptions) { + if o.GracePeriodSeconds != nil { + do.GracePeriodSeconds = o.GracePeriodSeconds + } + if o.Preconditions != nil { + do.Preconditions = o.Preconditions + } + if o.PropagationPolicy != nil { + do.PropagationPolicy = o.PropagationPolicy + } + if o.Raw != nil { + do.Raw = o.Raw + } + if o.DryRun != nil { + do.DryRun = o.DryRun + } +} + +// GracePeriodSeconds sets the grace period for the deletion +// to the given number of seconds. +type GracePeriodSeconds int64 + +// ApplyToDelete applies this configuration to the given delete options. +func (s GracePeriodSeconds) ApplyToDelete(opts *DeleteOptions) { + secs := int64(s) + opts.GracePeriodSeconds = &secs +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (s GracePeriodSeconds) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + s.ApplyToDelete(&opts.DeleteOptions) +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions metav1.Preconditions + +// ApplyToDelete applies this configuration to the given delete options. +func (p Preconditions) ApplyToDelete(opts *DeleteOptions) { + preconds := metav1.Preconditions(p) + opts.Preconditions = &preconds +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p Preconditions) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// PropagationPolicy determined whether and how garbage collection will be +// performed. Either this field or OrphanDependents may be set, but not both. +// The default policy is decided by the existing finalizer set in the +// metadata.finalizers and the resource-specific default policy. +// Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - +// allow the garbage collector to delete the dependents in the background; +// 'Foreground' - a cascading policy that deletes all dependents in the +// foreground. +type PropagationPolicy metav1.DeletionPropagation + +// ApplyToDelete applies the given delete options on these options. +// It will propagate to the dependents of the object to let the garbage collector handle it. +func (p PropagationPolicy) ApplyToDelete(opts *DeleteOptions) { + policy := metav1.DeletionPropagation(p) + opts.PropagationPolicy = &policy +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (p PropagationPolicy) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + p.ApplyToDelete(&opts.DeleteOptions) +} + +// }}} + +// {{{ Get Options + +// GetOptions contains options for get operation. +// Now it only has a Raw field, with support for specific resourceVersion. +type GetOptions struct { + // Raw represents raw GetOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface. + Raw *metav1.GetOptions +} + +var _ GetOption = &GetOptions{} + +// ApplyToGet implements GetOption for GetOptions. +func (o *GetOptions) ApplyToGet(lo *GetOptions) { + if o.Raw != nil { + lo.Raw = o.Raw + } +} + +// AsGetOptions returns these options as a flattened metav1.GetOptions. +// This may mutate the Raw field. +func (o *GetOptions) AsGetOptions() *metav1.GetOptions { + if o == nil || o.Raw == nil { + return &metav1.GetOptions{} + } + return o.Raw +} + +// ApplyOptions applies the given get options on these options, +// and then returns itself (for convenient chaining). +func (o *GetOptions) ApplyOptions(opts []GetOption) *GetOptions { + for _, opt := range opts { + opt.ApplyToGet(o) + } + return o +} + +// }}} + +// {{{ List Options + +// ListOptions contains options for limiting or filtering results. +// It's generally a subset of metav1.ListOptions, with support for +// pre-parsed selectors (since generally, selectors will be executed +// against the cache). +type ListOptions struct { + // LabelSelector filters results by label. Use labels.Parse() to + // set from raw string form. + LabelSelector labels.Selector + // FieldSelector filters results by a particular field. In order + // to use this with cache-based implementations, restrict usage to + // a single field-value pair that's been added to the indexers. + FieldSelector fields.Selector + + // Namespace represents the namespace to list for, or empty for + // non-namespaced objects, or to list across all namespaces. + Namespace string + + // Limit specifies the maximum number of results to return from the server. The server may + // not support this field on all resource types, but if it does and more results remain it + // will set the continue field on the returned list object. This field is not supported if watch + // is true in the Raw ListOptions. + Limit int64 + // Continue is a token returned by the server that lets a client retrieve chunks of results + // from the server by specifying limit. The server may reject requests for continuation tokens + // it does not recognize and will return a 410 error if the token can no longer be used because + // it has expired. This field is not supported if watch is true in the Raw ListOptions. + Continue string + + // UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. + // Be very careful with this, when enabled you must DeepCopy any object before mutating it, + // otherwise you will mutate the object in the cache. + // +optional + UnsafeDisableDeepCopy *bool + + // Raw represents raw ListOptions, as passed to the API server. Note + // that these may not be respected by all implementations of interface, + // and the LabelSelector, FieldSelector, Limit and Continue fields are ignored. + Raw *metav1.ListOptions +} + +var _ ListOption = &ListOptions{} + +// ApplyToList implements ListOption for ListOptions. +func (o *ListOptions) ApplyToList(lo *ListOptions) { + if o.LabelSelector != nil { + lo.LabelSelector = o.LabelSelector + } + if o.FieldSelector != nil { + lo.FieldSelector = o.FieldSelector + } + if o.Namespace != "" { + lo.Namespace = o.Namespace + } + if o.Raw != nil { + lo.Raw = o.Raw + } + if o.Limit > 0 { + lo.Limit = o.Limit + } + if o.Continue != "" { + lo.Continue = o.Continue + } + if o.UnsafeDisableDeepCopy != nil { + lo.UnsafeDisableDeepCopy = o.UnsafeDisableDeepCopy + } +} + +// AsListOptions returns these options as a flattened metav1.ListOptions. +// This may mutate the Raw field. +func (o *ListOptions) AsListOptions() *metav1.ListOptions { + if o == nil { + return &metav1.ListOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.ListOptions{} + } + if o.LabelSelector != nil { + o.Raw.LabelSelector = o.LabelSelector.String() + } + if o.FieldSelector != nil { + o.Raw.FieldSelector = o.FieldSelector.String() + } + if !o.Raw.Watch { + o.Raw.Limit = o.Limit + o.Raw.Continue = o.Continue + } + return o.Raw +} + +// ApplyOptions applies the given list options on these options, +// and then returns itself (for convenient chaining). +func (o *ListOptions) ApplyOptions(opts []ListOption) *ListOptions { + for _, opt := range opts { + opt.ApplyToList(o) + } + return o +} + +// MatchingLabels filters the list/delete operation on the given set of labels. +type MatchingLabels map[string]string + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabels) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid reserializing this over and over? + sel := labels.SelectorFromValidatedSet(map[string]string(m)) + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// HasLabels filters the list/delete operation checking if the set of labels exists +// without checking their values. +type HasLabels []string + +// ApplyToList applies this configuration to the given list options. +func (m HasLabels) ApplyToList(opts *ListOptions) { + sel := labels.NewSelector() + for _, label := range m { + r, err := labels.NewRequirement(label, selection.Exists, nil) + if err == nil { + sel = sel.Add(*r) + } + } + opts.LabelSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m HasLabels) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingLabelsSelector filters the list/delete operation on the given label +// selector (or index in the case of cached lists). A struct is used because +// labels.Selector is an interface, which cannot be aliased. +type MatchingLabelsSelector struct { + labels.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingLabelsSelector) ApplyToList(opts *ListOptions) { + opts.LabelSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingLabelsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFields filters the list/delete operation on the given field Set +// (or index in the case of cached lists). +type MatchingFields fields.Set + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFields) ApplyToList(opts *ListOptions) { + // TODO(directxman12): can we avoid re-serializing this? + sel := fields.Set(m).AsSelector() + opts.FieldSelector = sel +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFields) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// MatchingFieldsSelector filters the list/delete operation on the given field +// selector (or index in the case of cached lists). A struct is used because +// fields.Selector is an interface, which cannot be aliased. +type MatchingFieldsSelector struct { + fields.Selector +} + +// ApplyToList applies this configuration to the given list options. +func (m MatchingFieldsSelector) ApplyToList(opts *ListOptions) { + opts.FieldSelector = m +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (m MatchingFieldsSelector) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + m.ApplyToList(&opts.ListOptions) +} + +// InNamespace restricts the list/delete operation to the given namespace. +type InNamespace string + +// ApplyToList applies this configuration to the given list options. +func (n InNamespace) ApplyToList(opts *ListOptions) { + opts.Namespace = string(n) +} + +// ApplyToDeleteAllOf applies this configuration to the given an List options. +func (n InNamespace) ApplyToDeleteAllOf(opts *DeleteAllOfOptions) { + n.ApplyToList(&opts.ListOptions) +} + +// Limit specifies the maximum number of results to return from the server. +// Limit does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Limit int64 + +// ApplyToList applies this configuration to the given an list options. +func (l Limit) ApplyToList(opts *ListOptions) { + opts.Limit = int64(l) +} + +// UnsafeDisableDeepCopyOption indicates not to deep copy objects during list objects. +// Be very careful with this, when enabled you must DeepCopy any object before mutating it, +// otherwise you will mutate the object in the cache. +type UnsafeDisableDeepCopyOption bool + +// ApplyToList applies this configuration to the given an List options. +func (d UnsafeDisableDeepCopyOption) ApplyToList(opts *ListOptions) { + definitelyTrue := true + definitelyFalse := false + if d { + opts.UnsafeDisableDeepCopy = &definitelyTrue + } else { + opts.UnsafeDisableDeepCopy = &definitelyFalse + } +} + +// UnsafeDisableDeepCopy indicates not to deep copy objects during list objects. +const UnsafeDisableDeepCopy = UnsafeDisableDeepCopyOption(true) + +// Continue sets a continuation token to retrieve chunks of results when using limit. +// Continue does not implement DeleteAllOfOption interface because the server +// does not support setting it for deletecollection operations. +type Continue string + +// ApplyToList applies this configuration to the given an List options. +func (c Continue) ApplyToList(opts *ListOptions) { + opts.Continue = string(c) +} + +// }}} + +// {{{ Update Options + +// UpdateOptions contains options for create requests. It's generally a subset +// of metav1.UpdateOptions. +type UpdateOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw UpdateOptions, as passed to the API server. + Raw *metav1.UpdateOptions +} + +// AsUpdateOptions returns these options as a metav1.UpdateOptions. +// This may mutate the Raw field. +func (o *UpdateOptions) AsUpdateOptions() *metav1.UpdateOptions { + if o == nil { + return &metav1.UpdateOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.UpdateOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +// ApplyOptions applies the given update options on these options, +// and then returns itself (for convenient chaining). +func (o *UpdateOptions) ApplyOptions(opts []UpdateOption) *UpdateOptions { + for _, opt := range opts { + opt.ApplyToUpdate(o) + } + return o +} + +var _ UpdateOption = &UpdateOptions{} + +// ApplyToUpdate implements UpdateOption. +func (o *UpdateOptions) ApplyToUpdate(uo *UpdateOptions) { + if o.DryRun != nil { + uo.DryRun = o.DryRun + } + if o.FieldManager != "" { + uo.FieldManager = o.FieldManager + } + if o.Raw != nil { + uo.Raw = o.Raw + } +} + +// }}} + +// {{{ Patch Options + +// PatchOptions contains options for patch requests. +type PatchOptions struct { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + DryRun []string + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool + + // FieldManager is the name of the user or component submitting + // this request. It must be set with server-side apply. + FieldManager string + + // Raw represents raw PatchOptions, as passed to the API server. + Raw *metav1.PatchOptions +} + +// ApplyOptions applies the given patch options on these options, +// and then returns itself (for convenient chaining). +func (o *PatchOptions) ApplyOptions(opts []PatchOption) *PatchOptions { + for _, opt := range opts { + opt.ApplyToPatch(o) + } + return o +} + +// AsPatchOptions returns these options as a metav1.PatchOptions. +// This may mutate the Raw field. +func (o *PatchOptions) AsPatchOptions() *metav1.PatchOptions { + if o == nil { + return &metav1.PatchOptions{} + } + if o.Raw == nil { + o.Raw = &metav1.PatchOptions{} + } + + o.Raw.DryRun = o.DryRun + o.Raw.Force = o.Force + o.Raw.FieldManager = o.FieldManager + return o.Raw +} + +var _ PatchOption = &PatchOptions{} + +// ApplyToPatch implements PatchOptions. +func (o *PatchOptions) ApplyToPatch(po *PatchOptions) { + if o.DryRun != nil { + po.DryRun = o.DryRun + } + if o.Force != nil { + po.Force = o.Force + } + if o.FieldManager != "" { + po.FieldManager = o.FieldManager + } + if o.Raw != nil { + po.Raw = o.Raw + } +} + +// ForceOwnership indicates that in case of conflicts with server-side apply, +// the client should acquire ownership of the conflicting field. Most +// controllers should use this. +var ForceOwnership = forceOwnership{} + +type forceOwnership struct{} + +func (forceOwnership) ApplyToPatch(opts *PatchOptions) { + definitelyTrue := true + opts.Force = &definitelyTrue +} + +// }}} + +// {{{ DeleteAllOf Options + +// these are all just delete options and list options + +// DeleteAllOfOptions contains options for deletecollection (deleteallof) requests. +// It's just list and delete options smooshed together. +type DeleteAllOfOptions struct { + ListOptions + DeleteOptions +} + +// ApplyOptions applies the given deleteallof options on these options, +// and then returns itself (for convenient chaining). +func (o *DeleteAllOfOptions) ApplyOptions(opts []DeleteAllOfOption) *DeleteAllOfOptions { + for _, opt := range opts { + opt.ApplyToDeleteAllOf(o) + } + return o +} + +var _ DeleteAllOfOption = &DeleteAllOfOptions{} + +// ApplyToDeleteAllOf implements DeleteAllOfOption. +func (o *DeleteAllOfOptions) ApplyToDeleteAllOf(do *DeleteAllOfOptions) { + o.ApplyToList(&do.ListOptions) + o.ApplyToDelete(&do.DeleteOptions) +} + +// }}} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go new file mode 100644 index 00000000..11d60838 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/patch.go @@ -0,0 +1,213 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "fmt" + + jsonpatch "github.com/evanphx/json-patch/v5" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/strategicpatch" +) + +var ( + // Apply uses server-side apply to patch the given object. + Apply Patch = applyPatch{} + + // Merge uses the raw object as a merge patch, without modifications. + // Use MergeFrom if you wish to compute a diff instead. + Merge Patch = mergePatch{} +) + +type patch struct { + patchType types.PatchType + data []byte +} + +// Type implements Patch. +func (s *patch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *patch) Data(obj Object) ([]byte, error) { + return s.data, nil +} + +// RawPatch constructs a new Patch with the given PatchType and data. +func RawPatch(patchType types.PatchType, data []byte) Patch { + return &patch{patchType, data} +} + +// MergeFromWithOptimisticLock can be used if clients want to make sure a patch +// is being applied to the latest resource version of an object. +// +// The behavior is similar to what an Update would do, without the need to send the +// whole object. Usually this method is useful if you might have multiple clients +// acting on the same object and the same API version, but with different versions of the Go structs. +// +// For example, an "older" copy of a Widget that has fields A and B, and a "newer" copy with A, B, and C. +// Sending an update using the older struct definition results in C being dropped, whereas using a patch does not. +type MergeFromWithOptimisticLock struct{} + +// ApplyToMergeFrom applies this configuration to the given patch options. +func (m MergeFromWithOptimisticLock) ApplyToMergeFrom(in *MergeFromOptions) { + in.OptimisticLock = true +} + +// MergeFromOption is some configuration that modifies options for a merge-from patch data. +type MergeFromOption interface { + // ApplyToMergeFrom applies this configuration to the given patch options. + ApplyToMergeFrom(*MergeFromOptions) +} + +// MergeFromOptions contains options to generate a merge-from patch data. +type MergeFromOptions struct { + // OptimisticLock, when true, includes `metadata.resourceVersion` into the final + // patch data. If the `resourceVersion` field doesn't match what's stored, + // the operation results in a conflict and clients will need to try again. + OptimisticLock bool +} + +type mergeFromPatch struct { + patchType types.PatchType + createPatch func(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) + from Object + opts MergeFromOptions +} + +// Type implements Patch. +func (s *mergeFromPatch) Type() types.PatchType { + return s.patchType +} + +// Data implements Patch. +func (s *mergeFromPatch) Data(obj Object) ([]byte, error) { + original := s.from + modified := obj + + if s.opts.OptimisticLock { + version := original.GetResourceVersion() + if len(version) == 0 { + return nil, fmt.Errorf("cannot use OptimisticLock, object %q does not have any resource version we can use", original) + } + + original = original.DeepCopyObject().(Object) + original.SetResourceVersion("") + + modified = modified.DeepCopyObject().(Object) + modified.SetResourceVersion(version) + } + + originalJSON, err := json.Marshal(original) + if err != nil { + return nil, err + } + + modifiedJSON, err := json.Marshal(modified) + if err != nil { + return nil, err + } + + data, err := s.createPatch(originalJSON, modifiedJSON, obj) + if err != nil { + return nil, err + } + + return data, nil +} + +func createMergePatch(originalJSON, modifiedJSON []byte, _ interface{}) ([]byte, error) { + return jsonpatch.CreateMergePatch(originalJSON, modifiedJSON) +} + +func createStrategicMergePatch(originalJSON, modifiedJSON []byte, dataStruct interface{}) ([]byte, error) { + return strategicpatch.CreateTwoWayMergePatch(originalJSON, modifiedJSON, dataStruct) +} + +// MergeFrom creates a Patch that patches using the merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +func MergeFrom(obj Object) Patch { + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj} +} + +// MergeFromWithOptions creates a Patch that patches using the merge-patch strategy with the given object as base. +// See MergeFrom for more details. +func MergeFromWithOptions(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.MergePatchType, createPatch: createMergePatch, from: obj, opts: *options} +} + +// StrategicMergeFrom creates a Patch that patches using the strategic-merge-patch strategy with the given object as base. +// The difference between MergeFrom and StrategicMergeFrom lays in the handling of modified list fields. +// When using MergeFrom, existing lists will be completely replaced by new lists. +// When using StrategicMergeFrom, the list field's `patchStrategy` is respected if specified in the API type, +// e.g. the existing list is not replaced completely but rather merged with the new one using the list's `patchMergeKey`. +// See https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/ for more details on +// the difference between merge-patch and strategic-merge-patch. +// Please note, that CRDs don't support strategic-merge-patch, see +// https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/#advanced-features-and-flexibility +func StrategicMergeFrom(obj Object, opts ...MergeFromOption) Patch { + options := &MergeFromOptions{} + for _, opt := range opts { + opt.ApplyToMergeFrom(options) + } + return &mergeFromPatch{patchType: types.StrategicMergePatchType, createPatch: createStrategicMergePatch, from: obj, opts: *options} +} + +// mergePatch uses a raw merge strategy to patch the object. +type mergePatch struct{} + +// Type implements Patch. +func (p mergePatch) Type() types.PatchType { + return types.MergePatchType +} + +// Data implements Patch. +func (p mergePatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} + +// applyPatch uses server-side apply to patch the object. +type applyPatch struct{} + +// Type implements Patch. +func (p applyPatch) Type() types.PatchType { + return types.ApplyPatchType +} + +// Data implements Patch. +func (p applyPatch) Data(obj Object) ([]byte, error) { + // NB(directxman12): we might technically want to be using an actual encoder + // here (in case some more performant encoder is introduced) but this is + // correct and sufficient for our uses (it's what the JSON serializer in + // client-go does, more-or-less). + return json.Marshal(obj) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go new file mode 100644 index 00000000..19d1ab4d --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/split.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// NewDelegatingClientInput encapsulates the input parameters to create a new delegating client. +type NewDelegatingClientInput struct { + CacheReader Reader + Client Client + UncachedObjects []Object + CacheUnstructured bool +} + +// NewDelegatingClient creates a new delegating client. +// +// A delegating client forms a Client by composing separate reader, writer and +// statusclient interfaces. This way, you can have an Client that reads from a +// cache and writes to the API server. +func NewDelegatingClient(in NewDelegatingClientInput) (Client, error) { + uncachedGVKs := map[schema.GroupVersionKind]struct{}{} + for _, obj := range in.UncachedObjects { + gvk, err := apiutil.GVKForObject(obj, in.Client.Scheme()) + if err != nil { + return nil, err + } + uncachedGVKs[gvk] = struct{}{} + } + + return &delegatingClient{ + scheme: in.Client.Scheme(), + mapper: in.Client.RESTMapper(), + Reader: &delegatingReader{ + CacheReader: in.CacheReader, + ClientReader: in.Client, + scheme: in.Client.Scheme(), + uncachedGVKs: uncachedGVKs, + cacheUnstructured: in.CacheUnstructured, + }, + Writer: in.Client, + StatusClient: in.Client, + SubResourceClientConstructor: in.Client, + }, nil +} + +type delegatingClient struct { + Reader + Writer + StatusClient + SubResourceClientConstructor + + scheme *runtime.Scheme + mapper meta.RESTMapper +} + +// Scheme returns the scheme this client is using. +func (d *delegatingClient) Scheme() *runtime.Scheme { + return d.scheme +} + +// RESTMapper returns the rest mapper this client is using. +func (d *delegatingClient) RESTMapper() meta.RESTMapper { + return d.mapper +} + +// delegatingReader forms a Reader that will cause Get and List requests for +// unstructured types to use the ClientReader while requests for any other type +// of object with use the CacheReader. This avoids accidentally caching the +// entire cluster in the common case of loading arbitrary unstructured objects +// (e.g. from OwnerReferences). +type delegatingReader struct { + CacheReader Reader + ClientReader Reader + + uncachedGVKs map[schema.GroupVersionKind]struct{} + scheme *runtime.Scheme + cacheUnstructured bool +} + +func (d *delegatingReader) shouldBypassCache(obj runtime.Object) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, d.scheme) + if err != nil { + return false, err + } + // TODO: this is producing unsafe guesses that don't actually work, + // but it matches ~99% of the cases out there. + if meta.IsListType(obj) { + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + } + if _, isUncached := d.uncachedGVKs[gvk]; isUncached { + return true, nil + } + if !d.cacheUnstructured { + _, isUnstructured := obj.(*unstructured.Unstructured) + _, isUnstructuredList := obj.(*unstructured.UnstructuredList) + return isUnstructured || isUnstructuredList, nil + } + return false, nil +} + +// Get retrieves an obj for a given object key from the Kubernetes Cluster. +func (d *delegatingReader) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + if isUncached, err := d.shouldBypassCache(obj); err != nil { + return err + } else if isUncached { + return d.ClientReader.Get(ctx, key, obj, opts...) + } + return d.CacheReader.Get(ctx, key, obj, opts...) +} + +// List retrieves list of objects for a given namespace and list options. +func (d *delegatingReader) List(ctx context.Context, list ObjectList, opts ...ListOption) error { + if isUncached, err := d.shouldBypassCache(list); err != nil { + return err + } else if isUncached { + return d.ClientReader.List(ctx, list, opts...) + } + return d.CacheReader.List(ctx, list, opts...) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go new file mode 100644 index 00000000..ade25157 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/typed_client.go @@ -0,0 +1,281 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &typedClient{} +var _ Writer = &typedClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type typedClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (c *typedClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Update implements client.Client. +func (c *typedClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := &UpdateOptions{} + updateOpts.ApplyOptions(opts) + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +// Delete implements client.Client. +func (c *typedClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (c *typedClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), c.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (c *typedClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (c *typedClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + return r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Name(key.Name).Do(ctx).Into(obj) +} + +// List implements client.Client. +func (c *typedClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + r, err := c.cache.getResource(obj) + if err != nil { + return err + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), c.paramCodec). + Do(ctx). + Into(obj) +} + +func (c *typedClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (c *typedClient) CreateSubResource(ctx context.Context, obj Object, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), c.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +// UpdateSubResource used by SubResourceWriter to write status. +func (c *typedClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + // TODO(droot): examine the returned error and check if it error needs to be + // wrapped to improve the UX ? + // It will be nice to receive an error saying the object doesn't implement + // status subresource and check CRD definition + updateOpts := &SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), c.paramCodec). + Do(ctx). + Into(body) +} + +// PatchSubResource used by SubResourceWriter to write subresource. +func (c *typedClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + o, err := c.cache.getObjMeta(obj) + if err != nil { + return err + } + + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) + if err != nil { + return err + } + + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(data). + VersionedParams(patchOpts.AsPatchOptions(), c.paramCodec). + Do(ctx). + Into(body) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go new file mode 100644 index 00000000..7f25c7be --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/unstructured_client.go @@ -0,0 +1,364 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" +) + +var _ Reader = &unstructuredClient{} +var _ Writer = &unstructuredClient{} + +// client is a client.Client that reads and writes directly from/to an API server. It lazily initializes +// new clients at the time they are used, and caches the client. +type unstructuredClient struct { + cache *clientCache + paramCodec runtime.ParameterCodec +} + +// Create implements client.Client. +func (uc *unstructuredClient) Create(ctx context.Context, obj Object, opts ...CreateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &CreateOptions{} + createOpts.ApplyOptions(opts) + + result := o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Body(obj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Update implements client.Client. +func (uc *unstructuredClient) Update(ctx context.Context, obj Object, opts ...UpdateOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := UpdateOptions{} + updateOpts.ApplyOptions(opts) + + result := o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(obj). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + return result +} + +// Delete implements client.Client. +func (uc *unstructuredClient) Delete(ctx context.Context, obj Object, opts ...DeleteOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteOpts := DeleteOptions{} + deleteOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + Body(deleteOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// DeleteAllOf implements client.Client. +func (uc *unstructuredClient) DeleteAllOf(ctx context.Context, obj Object, opts ...DeleteAllOfOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + deleteAllOfOpts := DeleteAllOfOptions{} + deleteAllOfOpts.ApplyOptions(opts) + + return o.Delete(). + NamespaceIfScoped(deleteAllOfOpts.ListOptions.Namespace, o.isNamespaced()). + Resource(o.resource()). + VersionedParams(deleteAllOfOpts.AsListOptions(), uc.paramCodec). + Body(deleteAllOfOpts.AsDeleteOptions()). + Do(ctx). + Error() +} + +// Patch implements client.Client. +func (uc *unstructuredClient) Patch(ctx context.Context, obj Object, patch Patch, opts ...PatchOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + data, err := patch.Data(obj) + if err != nil { + return err + } + + patchOpts := &PatchOptions{} + patchOpts.ApplyOptions(opts) + + return o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). + Body(data). + Do(ctx). + Into(obj) +} + +// Get implements client.Client. +func (uc *unstructuredClient) Get(ctx context.Context, key ObjectKey, obj Object, opts ...GetOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + getOpts := GetOptions{} + getOpts.ApplyOptions(opts) + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + result := r.Get(). + NamespaceIfScoped(key.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Name(key.Name). + Do(ctx). + Into(obj) + + u.SetGroupVersionKind(gvk) + + return result +} + +// List implements client.Client. +func (uc *unstructuredClient) List(ctx context.Context, obj ObjectList, opts ...ListOption) error { + u, ok := obj.(*unstructured.UnstructuredList) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := uc.cache.getResource(obj) + if err != nil { + return err + } + + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), uc.paramCodec). + Do(ctx). + Into(obj) +} + +func (uc *unstructuredClient) GetSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceGetOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResource) + } + + if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + getOpts := &SubResourceGetOptions{} + getOpts.ApplyOptions(opts) + + return o.Get(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + VersionedParams(getOpts.AsGetOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (uc *unstructuredClient) CreateSubResource(ctx context.Context, obj, subResourceObj Object, subResource string, opts ...SubResourceCreateOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", subResourceObj) + } + + if _, ok := subResourceObj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + if subResourceObj.GetName() == "" { + subResourceObj.SetName(obj.GetName()) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + createOpts := &SubResourceCreateOptions{} + createOpts.ApplyOptions(opts) + + return o.Post(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(subResourceObj). + VersionedParams(createOpts.AsCreateOptions(), uc.paramCodec). + Do(ctx). + Into(subResourceObj) +} + +func (uc *unstructuredClient) UpdateSubResource(ctx context.Context, obj Object, subResource string, opts ...SubResourceUpdateOption) error { + if _, ok := obj.(*unstructured.Unstructured); !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + updateOpts := SubResourceUpdateOptions{} + updateOpts.ApplyOptions(opts) + + body := obj + if updateOpts.SubResourceBody != nil { + body = updateOpts.SubResourceBody + } + if body.GetName() == "" { + body.SetName(obj.GetName()) + } + if body.GetNamespace() == "" { + body.SetNamespace(obj.GetNamespace()) + } + + return o.Put(). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(body). + VersionedParams(updateOpts.AsUpdateOptions(), uc.paramCodec). + Do(ctx). + Into(body) +} + +func (uc *unstructuredClient) PatchSubResource(ctx context.Context, obj Object, subResource string, patch Patch, opts ...SubResourcePatchOption) error { + u, ok := obj.(*unstructured.Unstructured) + if !ok { + return fmt.Errorf("unstructured client did not understand object: %T", obj) + } + + gvk := u.GroupVersionKind() + + o, err := uc.cache.getObjMeta(obj) + if err != nil { + return err + } + + patchOpts := &SubResourcePatchOptions{} + patchOpts.ApplyOptions(opts) + + body := obj + if patchOpts.SubResourceBody != nil { + body = patchOpts.SubResourceBody + } + + data, err := patch.Data(body) + if err != nil { + return err + } + + result := o.Patch(patch.Type()). + NamespaceIfScoped(o.GetNamespace(), o.isNamespaced()). + Resource(o.resource()). + Name(o.GetName()). + SubResource(subResource). + Body(data). + VersionedParams(patchOpts.AsPatchOptions(), uc.paramCodec). + Do(ctx). + Into(body) + + u.SetGroupVersionKind(gvk) + return result +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go new file mode 100644 index 00000000..70490664 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/client/watch.go @@ -0,0 +1,114 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package client + +import ( + "context" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +// NewWithWatch returns a new WithWatch. +func NewWithWatch(config *rest.Config, options Options) (WithWatch, error) { + client, err := newClient(config, options) + if err != nil { + return nil, err + } + dynamicClient, err := dynamic.NewForConfig(config) + if err != nil { + return nil, err + } + return &watchingClient{client: client, dynamic: dynamicClient}, nil +} + +type watchingClient struct { + *client + dynamic dynamic.Interface +} + +func (w *watchingClient) Watch(ctx context.Context, list ObjectList, opts ...ListOption) (watch.Interface, error) { + switch l := list.(type) { + case *unstructured.UnstructuredList: + return w.unstructuredWatch(ctx, l, opts...) + case *metav1.PartialObjectMetadataList: + return w.metadataWatch(ctx, l, opts...) + default: + return w.typedWatch(ctx, l, opts...) + } +} + +func (w *watchingClient) listOpts(opts ...ListOption) ListOptions { + listOpts := ListOptions{} + listOpts.ApplyOptions(opts) + if listOpts.Raw == nil { + listOpts.Raw = &metav1.ListOptions{} + } + listOpts.Raw.Watch = true + + return listOpts +} + +func (w *watchingClient) metadataWatch(ctx context.Context, obj *metav1.PartialObjectMetadataList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + listOpts := w.listOpts(opts...) + + resInt, err := w.client.metadataClient.getResourceInterface(gvk, listOpts.Namespace) + if err != nil { + return nil, err + } + + return resInt.Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) unstructuredWatch(ctx context.Context, obj *unstructured.UnstructuredList, opts ...ListOption) (watch.Interface, error) { + gvk := obj.GroupVersionKind() + gvk.Kind = strings.TrimSuffix(gvk.Kind, "List") + + r, err := w.client.unstructuredClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + if listOpts.Namespace != "" && r.isNamespaced() { + return w.dynamic.Resource(r.mapping.Resource).Namespace(listOpts.Namespace).Watch(ctx, *listOpts.AsListOptions()) + } + return w.dynamic.Resource(r.mapping.Resource).Watch(ctx, *listOpts.AsListOptions()) +} + +func (w *watchingClient) typedWatch(ctx context.Context, obj ObjectList, opts ...ListOption) (watch.Interface, error) { + r, err := w.client.typedClient.cache.getResource(obj) + if err != nil { + return nil, err + } + + listOpts := w.listOpts(opts...) + + return r.Get(). + NamespaceIfScoped(listOpts.Namespace, r.isNamespaced()). + Resource(r.resource()). + VersionedParams(listOpts.AsListOptions(), w.client.typedClient.paramCodec). + Watch(ctx) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go new file mode 100644 index 00000000..adba3bbc --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/doc.go @@ -0,0 +1,28 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package event contains the definitions for the Event types produced by source.Sources and transformed into +reconcile.Requests by handler.EventHandler. + +You should rarely need to work with these directly -- instead, use Controller.Watch with +source.Sources and handler.EventHandlers. + +Events generally contain both a full runtime.Object that caused the event, as well +as a direct handle to that object's metadata. This saves a lot of typecasting in +code that works with Events. +*/ +package event diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go new file mode 100644 index 00000000..271b3c00 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/event/event.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package event + +import "sigs.k8s.io/controller-runtime/pkg/client" + +// CreateEvent is an event where a Kubernetes object was created. CreateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type CreateEvent struct { + // Object is the object from the event + Object client.Object +} + +// UpdateEvent is an event where a Kubernetes object was updated. UpdateEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type UpdateEvent struct { + // ObjectOld is the object from the event + ObjectOld client.Object + + // ObjectNew is the object from the event + ObjectNew client.Object +} + +// DeleteEvent is an event where a Kubernetes object was deleted. DeleteEvent should be generated +// by a source.Source and transformed into a reconcile.Request by an handler.EventHandler. +type DeleteEvent struct { + // Object is the object from the event + Object client.Object + + // DeleteStateUnknown is true if the Delete event was missed but we identified the object + // as having been deleted. + DeleteStateUnknown bool +} + +// GenericEvent is an event where the operation type is unknown (e.g. polling or event originating outside the cluster). +// GenericEvent should be generated by a source.Source and transformed into a reconcile.Request by an +// handler.EventHandler. +type GenericEvent struct { + // Object is the object from the event + Object client.Object +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go new file mode 100644 index 00000000..4f6d0843 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/field/selector/utils.go @@ -0,0 +1,35 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selector + +import ( + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/selection" +) + +// RequiresExactMatch checks if the given field selector is of the form `k=v` or `k==v`. +func RequiresExactMatch(sel fields.Selector) (field, val string, required bool) { + reqs := sel.Requirements() + if len(reqs) != 1 { + return "", "", false + } + req := reqs[0] + if req.Operator != selection.Equals && req.Operator != selection.DoubleEquals { + return "", "", false + } + return req.Field, req.Value, true +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go new file mode 100644 index 00000000..d91a0ca5 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/log/log.go @@ -0,0 +1,32 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/log" +) + +var ( + // RuntimeLog is a base parent logger for use inside controller-runtime. + RuntimeLog logr.Logger +) + +func init() { + RuntimeLog = log.Log.WithName("controller-runtime") +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go new file mode 100644 index 00000000..7057f3db --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/internal/objectutil/objectutil.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package objectutil + +import ( + "errors" + "fmt" + + apimeta "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" +) + +// FilterWithLabels returns a copy of the items in objs matching labelSel. +func FilterWithLabels(objs []runtime.Object, labelSel labels.Selector) ([]runtime.Object, error) { + outItems := make([]runtime.Object, 0, len(objs)) + for _, obj := range objs { + meta, err := apimeta.Accessor(obj) + if err != nil { + return nil, err + } + if labelSel != nil { + lbls := labels.Set(meta.GetLabels()) + if !labelSel.Matches(lbls) { + continue + } + } + outItems = append(outItems, obj.DeepCopyObject()) + } + return outItems, nil +} + +// IsAPINamespaced returns true if the object is namespace scoped. +// For unstructured objects the gvk is found from the object itself. +func IsAPINamespaced(obj runtime.Object, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + gvk, err := apiutil.GVKForObject(obj, scheme) + if err != nil { + return false, err + } + + return IsAPINamespacedWithGVK(gvk, scheme, restmapper) +} + +// IsAPINamespacedWithGVK returns true if the object having the provided +// GVK is namespace scoped. +func IsAPINamespacedWithGVK(gk schema.GroupVersionKind, scheme *runtime.Scheme, restmapper apimeta.RESTMapper) (bool, error) { + restmapping, err := restmapper.RESTMapping(schema.GroupKind{Group: gk.Group, Kind: gk.Kind}) + if err != nil { + return false, fmt.Errorf("failed to get restmapping: %w", err) + } + + scope := restmapping.Scope.Name() + + if scope == "" { + return false, errors.New("scope cannot be identified, empty scope returned") + } + + if scope != apimeta.RESTScopeNameRoot { + return true, nil + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go new file mode 100644 index 00000000..c82447d9 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/deleg.go @@ -0,0 +1,199 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogSink + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +func (p *loggerPromise) WithName(l *DelegatingLogSink, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended. +func (p *loggerPromise) WithValues(l *DelegatingLogSink, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger. +func (p *loggerPromise) Fulfill(parentLogSink logr.LogSink) { + sink := parentLogSink + if p.name != nil { + sink = sink.WithName(*p.name) + } + + if p.tags != nil { + sink = sink.WithValues(p.tags...) + } + + p.logger.lock.Lock() + p.logger.logger = sink + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + p.logger.logger = withCallDepth.WithCallDepth(1) + } + p.logger.promise = nil + p.logger.lock.Unlock() + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(sink) + } +} + +// DelegatingLogSink is a logsink that delegates to another logr.LogSink. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogSink struct { + lock sync.RWMutex + logger logr.LogSink + promise *loggerPromise + info logr.RuntimeInfo +} + +// Init implements logr.LogSink. +func (l *DelegatingLogSink) Init(info logr.RuntimeInfo) { + l.lock.Lock() + defer l.lock.Unlock() + l.info = info +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info +// logs. +func (l *DelegatingLogSink) Enabled(level int) bool { + l.lock.RLock() + defer l.lock.RUnlock() + return l.logger.Enabled(level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to +// the log line. The key/value pairs can then be used to add additional +// variable information. The key/value pairs should alternate string +// keys and arbitrary values. +func (l *DelegatingLogSink) Info(level int, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Info(level, msg, keysAndValues...) +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to calling Info with the "error" named value, but may +// have unique behavior, and should be preferred for logging errors (see the +// package documentations for more information). +// +// The msg field should be used to add context to any underlying error, +// while the err field should be used to attach the actual error that +// triggered this log line, if present. +func (l *DelegatingLogSink) Error(err error, msg string, keysAndValues ...interface{}) { + l.lock.RLock() + defer l.lock.RUnlock() + l.logger.Error(err, msg, keysAndValues...) +} + +// WithName provides a new Logger with the name appended. +func (l *DelegatingLogSink) WithName(name string) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + sink := l.logger.WithName(name) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended. +func (l *DelegatingLogSink) WithValues(tags ...interface{}) logr.LogSink { + l.lock.RLock() + defer l.lock.RUnlock() + + if l.promise == nil { + sink := l.logger.WithValues(tags...) + if withCallDepth, ok := sink.(logr.CallDepthLogSink); ok { + sink = withCallDepth.WithCallDepth(-1) + } + return sink + } + + res := &DelegatingLogSink{logger: l.logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogSink) Fulfill(actual logr.LogSink) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogSink constructs a new DelegatingLogSink which uses +// the given logger before its promise is fulfilled. +func NewDelegatingLogSink(initial logr.LogSink) *DelegatingLogSink { + l := &DelegatingLogSink{ + logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go new file mode 100644 index 00000000..082dce3a --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/log.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log contains utilities for fetching a new logger +// when one is not already available. +// +// # The Log Handle +// +// This package contains a root logr.Logger Log. It may be used to +// get a handle to whatever the root logging implementation is. By +// default, no implementation exists, and the handle returns "promises" +// to loggers. When the implementation is set using SetLogger, these +// "promises" will be converted over to real loggers. +// +// # Logr +// +// All logging in controller-runtime is structured, using a set of interfaces +// defined by a package called logr +// (https://pkg.go.dev/github.com/go-logr/logr). The sub-package zap provides +// helpers for setting up logr backed by Zap (go.uber.org/zap). +package log + +import ( + "context" + "sync" + "time" + + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + + loggerWasSet = true + dlog.Fulfill(l.GetSink()) +} + +// It is safe to assume that if this wasn't set within the first 30 seconds of a binaries +// lifetime, it will never get set. The DelegatingLogSink causes a high number of memory +// allocations when not given an actual Logger, so we set a NullLogSink to avoid that. +// +// We need to keep the DelegatingLogSink because we have various inits() that get a logger from +// here. They will always get executed before any code that imports controller-runtime +// has a chance to run and hence to set an actual logger. +func init() { + // Init is blocking, so start a new goroutine + go func() { + time.Sleep(30 * time.Second) + loggerWasSetLock.Lock() + defer loggerWasSetLock.Unlock() + if !loggerWasSet { + dlog.Fulfill(NullLogSink{}) + } + }() +} + +var ( + loggerWasSetLock sync.Mutex + loggerWasSet bool +) + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. If SetLogger is not called within +// the first 30 seconds of a binaries lifetime, it will get +// set to a NullLogSink. +var ( + dlog = NewDelegatingLogSink(NullLogSink{}) + Log = logr.New(dlog) +) + +// FromContext returns a logger with predefined values from a context.Context. +func FromContext(ctx context.Context, keysAndValues ...interface{}) logr.Logger { + log := Log + if ctx != nil { + if logger, err := logr.FromContext(ctx); err == nil { + log = logger + } + } + return log.WithValues(keysAndValues...) +} + +// IntoContext takes a context and sets the logger as one of its values. +// Use FromContext function to retrieve the logger. +func IntoContext(ctx context.Context, log logr.Logger) context.Context { + return logr.NewContext(ctx, log) +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go new file mode 100644 index 00000000..f3e81074 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/null.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogSink is a logr.Logger that does nothing. +type NullLogSink struct{} + +var _ logr.LogSink = NullLogSink{} + +// Init implements logr.LogSink. +func (log NullLogSink) Init(logr.RuntimeInfo) { +} + +// Info implements logr.InfoLogger. +func (NullLogSink) Info(_ int, _ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger. +func (NullLogSink) Enabled(level int) bool { + return false +} + +// Error implements logr.Logger. +func (NullLogSink) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// WithName implements logr.Logger. +func (log NullLogSink) WithName(_ string) logr.LogSink { + return log +} + +// WithValues implements logr.Logger. +func (log NullLogSink) WithValues(_ ...interface{}) logr.LogSink { + return log +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go new file mode 100644 index 00000000..e9522632 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/log/warning_handler.go @@ -0,0 +1,76 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// KubeAPIWarningLoggerOptions controls the behavior +// of a rest.WarningHandler constructed using NewKubeAPIWarningLogger(). +type KubeAPIWarningLoggerOptions struct { + // Deduplicate indicates a given warning message should only be written once. + // Setting this to true in a long-running process handling many warnings can + // result in increased memory use. + Deduplicate bool +} + +// KubeAPIWarningLogger is a wrapper around +// a provided logr.Logger that implements the +// rest.WarningHandler interface. +type KubeAPIWarningLogger struct { + // logger is used to log responses with the warning header + logger logr.Logger + // opts contain options controlling warning output + opts KubeAPIWarningLoggerOptions + // writtenLock gurads written + writtenLock sync.Mutex + // used to keep track of already logged messages + // and help in de-duplication. + written map[string]struct{} +} + +// HandleWarningHeader handles logging for responses from API server that are +// warnings with code being 299 and uses a logr.Logger for its logging purposes. +func (l *KubeAPIWarningLogger) HandleWarningHeader(code int, agent string, message string) { + if code != 299 || len(message) == 0 { + return + } + + if l.opts.Deduplicate { + l.writtenLock.Lock() + defer l.writtenLock.Unlock() + + if _, alreadyLogged := l.written[message]; alreadyLogged { + return + } + l.written[message] = struct{}{} + } + l.logger.Info(message) +} + +// NewKubeAPIWarningLogger returns an implementation of rest.WarningHandler that logs warnings +// with code = 299 to the provided logr.Logger. +func NewKubeAPIWarningLogger(l logr.Logger, opts KubeAPIWarningLoggerOptions) *KubeAPIWarningLogger { + h := &KubeAPIWarningLogger{logger: l, opts: opts} + if opts.Deduplicate { + h.written = map[string]struct{}{} + } + return h +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go new file mode 100644 index 00000000..e498107e --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package predicate defines Predicates used by Controllers to filter Events before they are provided to EventHandlers. +*/ +package predicate diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go new file mode 100644 index 00000000..8b0f3634 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/predicate/predicate.go @@ -0,0 +1,383 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package predicate + +import ( + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/event" + logf "sigs.k8s.io/controller-runtime/pkg/internal/log" + "sigs.k8s.io/controller-runtime/pkg/runtime/inject" +) + +var log = logf.RuntimeLog.WithName("predicate").WithName("eventFilters") + +// Predicate filters events before enqueuing the keys. +type Predicate interface { + // Create returns true if the Create event should be processed + Create(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + Delete(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + Update(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + Generic(event.GenericEvent) bool +} + +var _ Predicate = Funcs{} +var _ Predicate = ResourceVersionChangedPredicate{} +var _ Predicate = GenerationChangedPredicate{} +var _ Predicate = AnnotationChangedPredicate{} +var _ Predicate = or{} +var _ Predicate = and{} +var _ Predicate = not{} + +// Funcs is a function that implements Predicate. +type Funcs struct { + // Create returns true if the Create event should be processed + CreateFunc func(event.CreateEvent) bool + + // Delete returns true if the Delete event should be processed + DeleteFunc func(event.DeleteEvent) bool + + // Update returns true if the Update event should be processed + UpdateFunc func(event.UpdateEvent) bool + + // Generic returns true if the Generic event should be processed + GenericFunc func(event.GenericEvent) bool +} + +// Create implements Predicate. +func (p Funcs) Create(e event.CreateEvent) bool { + if p.CreateFunc != nil { + return p.CreateFunc(e) + } + return true +} + +// Delete implements Predicate. +func (p Funcs) Delete(e event.DeleteEvent) bool { + if p.DeleteFunc != nil { + return p.DeleteFunc(e) + } + return true +} + +// Update implements Predicate. +func (p Funcs) Update(e event.UpdateEvent) bool { + if p.UpdateFunc != nil { + return p.UpdateFunc(e) + } + return true +} + +// Generic implements Predicate. +func (p Funcs) Generic(e event.GenericEvent) bool { + if p.GenericFunc != nil { + return p.GenericFunc(e) + } + return true +} + +// NewPredicateFuncs returns a predicate funcs that applies the given filter function +// on CREATE, UPDATE, DELETE and GENERIC events. For UPDATE events, the filter is applied +// to the new object. +func NewPredicateFuncs(filter func(object client.Object) bool) Funcs { + return Funcs{ + CreateFunc: func(e event.CreateEvent) bool { + return filter(e.Object) + }, + UpdateFunc: func(e event.UpdateEvent) bool { + return filter(e.ObjectNew) + }, + DeleteFunc: func(e event.DeleteEvent) bool { + return filter(e.Object) + }, + GenericFunc: func(e event.GenericEvent) bool { + return filter(e.Object) + }, + } +} + +// ResourceVersionChangedPredicate implements a default update predicate function on resource version change. +type ResourceVersionChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating resource version change. +func (ResourceVersionChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object to update", "event", e) + return false + } + + return e.ObjectNew.GetResourceVersion() != e.ObjectOld.GetResourceVersion() +} + +// GenerationChangedPredicate implements a default update predicate function on Generation change. +// +// This predicate will skip update events that have no change in the object's metadata.generation field. +// The metadata.generation field of an object is incremented by the API server when writes are made to the spec field of an object. +// This allows a controller to ignore update events where the spec is unchanged, and only the metadata and/or status fields are changed. +// +// For CustomResource objects the Generation is only incremented when the status subresource is enabled. +// +// Caveats: +// +// * The assumption that the Generation is incremented only on writing to the spec does not hold for all APIs. +// E.g For Deployment objects the Generation is also incremented on writes to the metadata.annotations field. +// For object types other than CustomResources be sure to verify which fields will trigger a Generation increment when they are written to. +// +// * With this predicate, any update events with writes only to the status field will not be reconciled. +// So in the event that the status block is overwritten or wiped by someone else the controller will not self-correct to restore the correct status. +type GenerationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating generation change. +func (GenerationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return e.ObjectNew.GetGeneration() != e.ObjectOld.GetGeneration() +} + +// AnnotationChangedPredicate implements a default update predicate function on annotation change. +// +// This predicate will skip update events that have no change in the object's annotation. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.AnnotationChangedPredicate{})) +// +// This is mostly useful for controllers that needs to trigger both when the resource's generation is incremented +// (i.e., when the resource' .spec changes), or an annotation changes (e.g., for a staging/alpha API). +type AnnotationChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for validating annotation change. +func (AnnotationChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetAnnotations(), e.ObjectOld.GetAnnotations()) +} + +// LabelChangedPredicate implements a default update predicate function on label change. +// +// This predicate will skip update events that have no change in the object's label. +// It is intended to be used in conjunction with the GenerationChangedPredicate, as in the following example: +// +// Controller.Watch( +// +// &source.Kind{Type: v1.MyCustomKind}, +// &handler.EnqueueRequestForObject{}, +// predicate.Or(predicate.GenerationChangedPredicate{}, predicate.LabelChangedPredicate{})) +// +// This will be helpful when object's labels is carrying some extra specification information beyond object's spec, +// and the controller will be triggered if any valid spec change (not only in spec, but also in labels) happens. +type LabelChangedPredicate struct { + Funcs +} + +// Update implements default UpdateEvent filter for checking label change. +func (LabelChangedPredicate) Update(e event.UpdateEvent) bool { + if e.ObjectOld == nil { + log.Error(nil, "Update event has no old object to update", "event", e) + return false + } + if e.ObjectNew == nil { + log.Error(nil, "Update event has no new object for update", "event", e) + return false + } + + return !reflect.DeepEqual(e.ObjectNew.GetLabels(), e.ObjectOld.GetLabels()) +} + +// And returns a composite predicate that implements a logical AND of the predicates passed to it. +func And(predicates ...Predicate) Predicate { + return and{predicates} +} + +type and struct { + predicates []Predicate +} + +func (a and) InjectFunc(f inject.Func) error { + for _, p := range a.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (a and) Create(e event.CreateEvent) bool { + for _, p := range a.predicates { + if !p.Create(e) { + return false + } + } + return true +} + +func (a and) Update(e event.UpdateEvent) bool { + for _, p := range a.predicates { + if !p.Update(e) { + return false + } + } + return true +} + +func (a and) Delete(e event.DeleteEvent) bool { + for _, p := range a.predicates { + if !p.Delete(e) { + return false + } + } + return true +} + +func (a and) Generic(e event.GenericEvent) bool { + for _, p := range a.predicates { + if !p.Generic(e) { + return false + } + } + return true +} + +// Or returns a composite predicate that implements a logical OR of the predicates passed to it. +func Or(predicates ...Predicate) Predicate { + return or{predicates} +} + +type or struct { + predicates []Predicate +} + +func (o or) InjectFunc(f inject.Func) error { + for _, p := range o.predicates { + if err := f(p); err != nil { + return err + } + } + return nil +} + +func (o or) Create(e event.CreateEvent) bool { + for _, p := range o.predicates { + if p.Create(e) { + return true + } + } + return false +} + +func (o or) Update(e event.UpdateEvent) bool { + for _, p := range o.predicates { + if p.Update(e) { + return true + } + } + return false +} + +func (o or) Delete(e event.DeleteEvent) bool { + for _, p := range o.predicates { + if p.Delete(e) { + return true + } + } + return false +} + +func (o or) Generic(e event.GenericEvent) bool { + for _, p := range o.predicates { + if p.Generic(e) { + return true + } + } + return false +} + +// Not returns a predicate that implements a logical NOT of the predicate passed to it. +func Not(predicate Predicate) Predicate { + return not{predicate} +} + +type not struct { + predicate Predicate +} + +func (n not) InjectFunc(f inject.Func) error { + return f(n.predicate) +} + +func (n not) Create(e event.CreateEvent) bool { + return !n.predicate.Create(e) +} + +func (n not) Update(e event.UpdateEvent) bool { + return !n.predicate.Update(e) +} + +func (n not) Delete(e event.DeleteEvent) bool { + return !n.predicate.Delete(e) +} + +func (n not) Generic(e event.GenericEvent) bool { + return !n.predicate.Generic(e) +} + +// LabelSelectorPredicate constructs a Predicate from a LabelSelector. +// Only objects matching the LabelSelector will be admitted. +func LabelSelectorPredicate(s metav1.LabelSelector) (Predicate, error) { + selector, err := metav1.LabelSelectorAsSelector(&s) + if err != nil { + return Funcs{}, err + } + return NewPredicateFuncs(func(o client.Object) bool { + return selector.Matches(labels.Set(o.GetLabels())) + }), nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go new file mode 100644 index 00000000..d221dd7b --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package reconcile defines the Reconciler interface to implement Kubernetes APIs. Reconciler is provided +to Controllers at creation time as the API implementation. +*/ +package reconcile diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go new file mode 100644 index 00000000..8285e2ca --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/reconcile/reconcile.go @@ -0,0 +1,102 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reconcile + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/types" +) + +// Result contains the result of a Reconciler invocation. +type Result struct { + // Requeue tells the Controller to requeue the reconcile key. Defaults to false. + Requeue bool + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + RequeueAfter time.Duration +} + +// IsZero returns true if this result is empty. +func (r *Result) IsZero() bool { + if r == nil { + return true + } + return *r == Result{} +} + +// Request contains the information necessary to reconcile a Kubernetes object. This includes the +// information to uniquely identify the object - its Name and Namespace. It does NOT contain information about +// any specific Event or the object contents itself. +type Request struct { + // NamespacedName is the name and namespace of the object to reconcile. + types.NamespacedName +} + +/* +Reconciler implements a Kubernetes API for a specific Resource by Creating, Updating or Deleting Kubernetes +objects, or by making changes to systems external to the cluster (e.g. cloudproviders, github, etc). + +reconcile implementations compare the state specified in an object by a user against the actual cluster state, +and then perform operations to make the actual cluster state reflect the state specified by the user. + +Typically, reconcile is triggered by a Controller in response to cluster Events (e.g. Creating, Updating, +Deleting Kubernetes objects) or external Events (GitHub Webhooks, polling external sources, etc). + +Example reconcile Logic: + +* Read an object and all the Pods it owns. +* Observe that the object spec specifies 5 replicas but actual cluster contains only 1 Pod replica. +* Create 4 Pods and set their OwnerReferences to the object. + +reconcile may be implemented as either a type: + + type reconciler struct {} + + func (reconciler) Reconcile(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { + // Implement business logic of reading and writing objects here + return reconcile.Result{}, nil + } + +Or as a function: + + reconcile.Func(func(ctx context.Context, o reconcile.Request) (reconcile.Result, error) { + // Implement business logic of reading and writing objects here + return reconcile.Result{}, nil + }) + +Reconciliation is level-based, meaning action isn't driven off changes in individual Events, but instead is +driven by actual cluster state read from the apiserver or a local cache. +For example if responding to a Pod Delete Event, the Request won't contain that a Pod was deleted, +instead the reconcile function observes this when reading the cluster state and seeing the Pod as missing. +*/ +type Reconciler interface { + // Reconcile performs a full reconciliation for the object referred to by the Request. + // The Controller will requeue the Request to be processed again if an error is non-nil or + // Result.Requeue is true, otherwise upon completion it will remove the work from the queue. + Reconcile(context.Context, Request) (Result, error) +} + +// Func is a function that implements the reconcile interface. +type Func func(context.Context, Request) (Result, error) + +var _ Reconciler = Func(nil) + +// Reconcile implements Reconciler. +func (r Func) Reconcile(ctx context.Context, o Request) (Result, error) { return r(ctx, o) } diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go new file mode 100644 index 00000000..17c60895 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package inject defines interfaces and functions for propagating dependencies from a ControllerManager to +the components registered with it. Dependencies are propagated to Reconciler, Source, EventHandler and Predicate +objects which implement the Injectable interfaces. +*/ +package inject diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go new file mode 100644 index 00000000..c8c56ba8 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/runtime/inject/inject.go @@ -0,0 +1,164 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package inject is used by a Manager to inject types into Sources, EventHandlers, Predicates, and Reconciles. +// Deprecated: Use manager.Options fields directly. This package will be removed in v0.10. +package inject + +import ( + "github.com/go-logr/logr" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// Cache is used by the ControllerManager to inject Cache into Sources, EventHandlers, Predicates, and +// Reconciles. +type Cache interface { + InjectCache(cache cache.Cache) error +} + +// CacheInto will set informers on i and return the result if it implements Cache. Returns +// false if i does not implement Cache. +func CacheInto(c cache.Cache, i interface{}) (bool, error) { + if s, ok := i.(Cache); ok { + return true, s.InjectCache(c) + } + return false, nil +} + +// APIReader is used by the Manager to inject the APIReader into necessary types. +type APIReader interface { + InjectAPIReader(client.Reader) error +} + +// APIReaderInto will set APIReader on i and return the result if it implements APIReaderInto. +// Returns false if i does not implement APIReader. +func APIReaderInto(reader client.Reader, i interface{}) (bool, error) { + if s, ok := i.(APIReader); ok { + return true, s.InjectAPIReader(reader) + } + return false, nil +} + +// Config is used by the ControllerManager to inject Config into Sources, EventHandlers, Predicates, and +// Reconciles. +type Config interface { + InjectConfig(*rest.Config) error +} + +// ConfigInto will set config on i and return the result if it implements Config. Returns +// false if i does not implement Config. +func ConfigInto(config *rest.Config, i interface{}) (bool, error) { + if s, ok := i.(Config); ok { + return true, s.InjectConfig(config) + } + return false, nil +} + +// Client is used by the ControllerManager to inject client into Sources, EventHandlers, Predicates, and +// Reconciles. +type Client interface { + InjectClient(client.Client) error +} + +// ClientInto will set client on i and return the result if it implements Client. Returns +// false if i does not implement Client. +func ClientInto(client client.Client, i interface{}) (bool, error) { + if s, ok := i.(Client); ok { + return true, s.InjectClient(client) + } + return false, nil +} + +// Scheme is used by the ControllerManager to inject Scheme into Sources, EventHandlers, Predicates, and +// Reconciles. +type Scheme interface { + InjectScheme(scheme *runtime.Scheme) error +} + +// SchemeInto will set scheme and return the result on i if it implements Scheme. Returns +// false if i does not implement Scheme. +func SchemeInto(scheme *runtime.Scheme, i interface{}) (bool, error) { + if is, ok := i.(Scheme); ok { + return true, is.InjectScheme(scheme) + } + return false, nil +} + +// Stoppable is used by the ControllerManager to inject stop channel into Sources, +// EventHandlers, Predicates, and Reconciles. +type Stoppable interface { + InjectStopChannel(<-chan struct{}) error +} + +// StopChannelInto will set stop channel on i and return the result if it implements Stoppable. +// Returns false if i does not implement Stoppable. +func StopChannelInto(stop <-chan struct{}, i interface{}) (bool, error) { + if s, ok := i.(Stoppable); ok { + return true, s.InjectStopChannel(stop) + } + return false, nil +} + +// Mapper is used to inject the rest mapper to components that may need it. +type Mapper interface { + InjectMapper(meta.RESTMapper) error +} + +// MapperInto will set the rest mapper on i and return the result if it implements Mapper. +// Returns false if i does not implement Mapper. +func MapperInto(mapper meta.RESTMapper, i interface{}) (bool, error) { + if m, ok := i.(Mapper); ok { + return true, m.InjectMapper(mapper) + } + return false, nil +} + +// Func injects dependencies into i. +type Func func(i interface{}) error + +// Injector is used by the ControllerManager to inject Func into Controllers. +type Injector interface { + InjectFunc(f Func) error +} + +// InjectorInto will set f and return the result on i if it implements Injector. Returns +// false if i does not implement Injector. +func InjectorInto(f Func, i interface{}) (bool, error) { + if ii, ok := i.(Injector); ok { + return true, ii.InjectFunc(f) + } + return false, nil +} + +// Logger is used to inject Loggers into components that need them +// and don't otherwise have opinions. +type Logger interface { + InjectLogger(l logr.Logger) error +} + +// LoggerInto will set the logger on the given object if it implements inject.Logger, +// returning true if a InjectLogger was called, and false otherwise. +func LoggerInto(l logr.Logger, i interface{}) (bool, error) { + if injectable, wantsLogger := i.(Logger); wantsLogger { + return true, injectable.InjectLogger(l) + } + return false, nil +} diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go new file mode 100644 index 00000000..55ebe217 --- /dev/null +++ b/vendor/sigs.k8s.io/controller-runtime/pkg/scheme/scheme.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scheme contains utilities for gradually building Schemes, +// which contain information associating Go types with Kubernetes +// groups, versions, and kinds. +// +// Each API group should define a utility function +// called AddToScheme for adding its types to a Scheme: +// +// // in package myapigroupv1... +// var ( +// SchemeGroupVersion = schema.GroupVersion{Group: "my.api.group", Version: "v1"} +// SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// AddToScheme = SchemeBuilder.AddToScheme +// ) +// +// func init() { +// SchemeBuilder.Register(&MyType{}, &MyTypeList) +// } +// var ( +// scheme *runtime.Scheme = runtime.NewScheme() +// ) +// +// This also true of the built-in Kubernetes types. Then, in the entrypoint for +// your manager, assemble the scheme containing exactly the types you need, +// panicing if scheme registration failed. For instance, if our controller needs +// types from the core/v1 API group (e.g. Pod), plus types from my.api.group/v1: +// +// func init() { +// utilruntime.Must(myapigroupv1.AddToScheme(scheme)) +// utilruntime.Must(kubernetesscheme.AddToScheme(scheme)) +// } +// +// func main() { +// mgr := controllers.NewManager(context.Background(), controllers.GetConfigOrDie(), manager.Options{ +// Scheme: scheme, +// }) +// // ... +// } +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Builder builds a new Scheme for mapping go types to Kubernetes GroupVersionKinds. +type Builder struct { + GroupVersion schema.GroupVersion + runtime.SchemeBuilder +} + +// Register adds one or more objects to the SchemeBuilder so they can be added to a Scheme. Register mutates bld. +func (bld *Builder) Register(object ...runtime.Object) *Builder { + bld.SchemeBuilder.Register(func(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(bld.GroupVersion, object...) + metav1.AddToGroupVersion(scheme, bld.GroupVersion) + return nil + }) + return bld +} + +// RegisterAll registers all types from the Builder argument. RegisterAll mutates bld. +func (bld *Builder) RegisterAll(b *Builder) *Builder { + bld.SchemeBuilder = append(bld.SchemeBuilder, b.SchemeBuilder...) + return bld +} + +// AddToScheme adds all registered types to s. +func (bld *Builder) AddToScheme(s *runtime.Scheme) error { + return bld.SchemeBuilder.AddToScheme(s) +} + +// Build returns a new Scheme containing the registered types. +func (bld *Builder) Build() (*runtime.Scheme, error) { + s := runtime.NewScheme() + return s, bld.AddToScheme(s) +} diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go index a6c41936..6a13cf2d 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/decode.go @@ -75,6 +75,8 @@ import ( // either be any string type, an integer, implement json.Unmarshaler, or // implement encoding.TextUnmarshaler. // +// If the JSON-encoded data contain a syntax error, Unmarshal returns a SyntaxError. +// // If a JSON value is not appropriate for a given target type, // or if a JSON number overflows the target type, Unmarshal // skips that field and completes the unmarshaling as best it can. @@ -85,14 +87,13 @@ import ( // // The JSON null value unmarshals into an interface, map, pointer, or slice // by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect +// “not present,” unmarshaling a JSON null into any other Go type has no effect // on the value and produces no error. // // When unmarshaling quoted strings, invalid UTF-8 or // invalid UTF-16 surrogate pairs are not treated as an error. // Instead, they are replaced by the Unicode replacement // character U+FFFD. -// func Unmarshal(data []byte, v any, opts ...UnmarshalOpt) error { // Check for well-formedness. // Avoids filling out half a data structure diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go index 1f5e3e44..5b67251f 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/encode.go @@ -77,31 +77,31 @@ import ( // // Examples of struct field tags and their meanings: // -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` +// // Field appears in JSON as key "myName". +// Field int `json:"myName"` // -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` +// // Field appears in JSON as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `json:"myName,omitempty"` // -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` +// // Field appears in JSON as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `json:",omitempty"` // -// // Field is ignored by this package. -// Field int `json:"-"` +// // Field is ignored by this package. +// Field int `json:"-"` // -// // Field appears in JSON as key "-". -// Field int `json:"-,"` +// // Field appears in JSON as key "-". +// Field int `json:"-,"` // // The "string" option signals that a field is stored as JSON inside a // JSON-encoded string. It applies only to fields of string, floating point, // integer, or boolean types. This extra level of encoding is sometimes used // when communicating with JavaScript programs: // -// Int64String int64 `json:",string"` +// Int64String int64 `json:",string"` // // The key name will be used if it's a non-empty string consisting of // only Unicode letters, digits, and ASCII punctuation except quotation @@ -154,7 +154,6 @@ import ( // JSON cannot represent cyclic data structures and Marshal does not // handle them. Passing cyclic structures to Marshal will result in // an error. -// func Marshal(v any) ([]byte, error) { e := newEncodeState() @@ -784,7 +783,7 @@ func (me mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { if e.ptrLevel++; e.ptrLevel > startDetectingCyclesAfter { // We're a large number of nested ptrEncoder.encode calls deep; // start checking if we've run into a pointer cycle. - ptr := v.Pointer() + ptr := v.UnsafePointer() if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } @@ -877,9 +876,9 @@ func (se sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) { // Here we use a struct to memorize the pointer to the first element of the slice // and its length. ptr := struct { - ptr uintptr + ptr interface{} // always an unsafe.Pointer, but avoids a dependency on package unsafe len int - }{v.Pointer(), v.Len()} + }{v.UnsafePointer(), v.Len()} if _, ok := e.ptrSeen[ptr]; ok { e.error(&UnsupportedValueError{v, fmt.Sprintf("encountered a cycle via %s", v.Type())}) } diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go index 9e170127..ab249b2b 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/fold.go @@ -24,8 +24,9 @@ const ( // 4) simpleLetterEqualFold, no specials, no non-letters. // // The letters S and K are special because they map to 3 runes, not just 2: -// * S maps to s and to U+017F 'ſ' Latin small letter long s -// * k maps to K and to U+212A 'K' Kelvin sign +// - S maps to s and to U+017F 'ſ' Latin small letter long s +// - k maps to K and to U+212A 'K' Kelvin sign +// // See https://play.golang.org/p/tTxjOc0OGo // // The returned function is specialized for matching against s and diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go index dbaa821b..22fc6922 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/scanner.go @@ -27,6 +27,7 @@ func Valid(data []byte) bool { // checkValid verifies that data is valid JSON-encoded data. // scan is passed in for use by checkValid to avoid an allocation. +// checkValid returns nil or a SyntaxError. func checkValid(data []byte, scan *scanner) error { scan.reset() for _, c := range data { @@ -42,6 +43,7 @@ func checkValid(data []byte, scan *scanner) error { } // A SyntaxError is a description of a JSON syntax error. +// Unmarshal will return a SyntaxError if the JSON can't be parsed. type SyntaxError struct { msg string // description of error Offset int64 // error occurred after reading Offset bytes diff --git a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go index 6775b4cf..1967755a 100644 --- a/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go +++ b/vendor/sigs.k8s.io/json/internal/golang/encoding/json/stream.go @@ -289,7 +289,6 @@ var _ Unmarshaler = (*RawMessage)(nil) // Number, for JSON numbers // string, for JSON string literals // nil, for JSON null -// type Token any */ diff --git a/vendor/sigs.k8s.io/json/json.go b/vendor/sigs.k8s.io/json/json.go index d3a42b42..e8f31b16 100644 --- a/vendor/sigs.k8s.io/json/json.go +++ b/vendor/sigs.k8s.io/json/json.go @@ -34,13 +34,13 @@ type Decoder interface { } // NewDecoderCaseSensitivePreserveInts returns a decoder that matches the behavior of encoding/json#NewDecoder, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { d := internaljson.NewDecoder(r) d.CaseSensitive() @@ -51,13 +51,13 @@ func NewDecoderCaseSensitivePreserveInts(r io.Reader) Decoder { // UnmarshalCaseSensitivePreserveInts parses the JSON-encoded data and stores the result in the value pointed to by v. // // UnmarshalCaseSensitivePreserveInts matches the behavior of encoding/json#Unmarshal, with the following changes: -// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) -// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. -// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if -// the JSON data does not contain a "." character and parses as an integer successfully and -// does not overflow int64. Otherwise, the number is unmarshaled as a float64. -// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, -// but will be recognizeable by this package's IsSyntaxError() function. +// - When unmarshaling into a struct, JSON keys must case-sensitively match `json` tag names (for tagged struct fields) +// or struct field names (for untagged struct fields), or they are treated as unknown fields and discarded. +// - When unmarshaling a number into an interface value, it is unmarshaled as an int64 if +// the JSON data does not contain a "." character and parses as an integer successfully and +// does not overflow int64. Otherwise, the number is unmarshaled as a float64. +// - If a syntax error is returned, it will not be of type encoding/json#SyntaxError, +// but will be recognizeable by this package's IsSyntaxError() function. func UnmarshalCaseSensitivePreserveInts(data []byte, v interface{}) error { return internaljson.Unmarshal( data,