diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml new file mode 100644 index 0000000000..a9a7d2ed8d --- /dev/null +++ b/.github/workflows/dependency-review.yml @@ -0,0 +1,18 @@ +name: 'Dependency Review' +on: [pull_request] +permissions: + contents: read + pull-requests: write +jobs: + dependency-review: + runs-on: ubuntu-latest + steps: + - name: 'Checkout Repository' + uses: actions/checkout@v4 + - name: Dependency Review + uses: actions/dependency-review-action@v4 + with: + comment-summary-in-pr: on-failure + # Licenses need to come from https://spdx.org/licenses/ + deny-licenses: GPL-1.0-only, GPL-1.0-or-later, GPL-2.0-only, GPL-2.0-or-later, GPL-3.0-only, GPL-3.0-or-later + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e085749b68..62daf171f2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,3 +1,15 @@ # Contributing to Flyte -For information related to contributing to Flyte, please check out the [Contributing to Flyte](https://docs.flyte.org/en/latest/community/contribute.html) section of the documentation. +For information related to contributing to Flyte, please check out the [Contributing to Flyte](https://docs.flyte.org/en/latest/community/contribute/index.html) section of the documentation. + +## Recommendation Order (For Beginners) +* Setup dev environment +* Read the following and run at least 5 examples. Pay close attention to the generated outputs, the Graph view, task + logs, etc. Repeat with as many examples as you need to have an initial understanding of what an execution looks like: + * https://docs.flyte.org/en/latest/user_guide/introduction.html + * https://docs.flyte.org/en/latest/flytesnacks/userguide.html +* Finish reading the [Concepts](https://docs.flyte.org/en/latest/user_guide/concepts/main_concepts/index.html) +* Finish reading the [Control Plane](https://docs.flyte.org/en/latest/user_guide/concepts/control_plane/index.html) +* Finish reading the [Component Architecture](https://docs.flyte.org/en/latest/user_guide/concepts/component_architecture/index.html) +* Choose 2 good first issues from the following and start solving them with the knowledge you have read. +* Familiar with using [ImageSpec to push images to localhost for development](https://docs.flyte.org/en/latest/user_guide/customizing_dependencies/imagespec.html#image-spec-example) diff --git a/charts/flyte-binary/README.md b/charts/flyte-binary/README.md index e702a7fcd7..e7df1018db 100644 --- a/charts/flyte-binary/README.md +++ b/charts/flyte-binary/README.md @@ -116,6 +116,8 @@ Chart for basic single Flyte executable deployment | flyte-core-components.admin.disableClusterResourceManager | bool | `false` | | | flyte-core-components.admin.disableScheduler | bool | `false` | | | flyte-core-components.admin.disabled | bool | `false` | | +| flyte-core-components.admin.seedProjectsWithDetails[0].description | string | `"Default project setup."` | | +| flyte-core-components.admin.seedProjectsWithDetails[0].name | string | `"flytesnacks"` | | | flyte-core-components.admin.seedProjects[0] | string | `"flytesnacks"` | | | flyte-core-components.dataCatalog.disabled | bool | `false` | | | flyte-core-components.propeller.disableWebhook | bool | `false` | | diff --git a/charts/flyte-binary/values.yaml b/charts/flyte-binary/values.yaml index 38509251a2..eee01d16c6 100644 --- a/charts/flyte-binary/values.yaml +++ b/charts/flyte-binary/values.yaml @@ -20,6 +20,14 @@ flyte-core-components: # seedProjects flyte projects to create by default seedProjects: - flytesnacks + # seedProjectsWithDetails flyte projects to create by default with description + # If there is an overlap between seedProjects and seedProjectsWithDetails, + # the description provided in seedProjectsWithDetails will take precedence. + # For seedProjects without a corresponding description in seedProjectsWithDetails, + # a default description will be auto-generated for the project. + seedProjectsWithDetails: + - name: flytesnacks + description: Default project setup. # propeller Configuration to disable propeller or any of its components propeller: # disabled Disables flytepropeller diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index 3fa8d2dc2a..6aed892810 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -55,10 +55,25 @@ helm install gateway bitnami/contour -n flyte | Key | Type | Default | Description | |-----|------|---------|-------------| -| cloud_events.aws.region | string | `"us-east-2"` | | +| cloud_events.aws | object | `{"region":"us-east-2"}` | Configuration for sending cloud events to AWS SNS | | cloud_events.enable | bool | `false` | | | cloud_events.eventsPublisher.eventTypes[0] | string | `"all"` | | | cloud_events.eventsPublisher.topicName | string | `"arn:aws:sns:us-east-2:123456:123-my-topic"` | | +| cloud_events.gcp | object | `{"region":"us-east1"}` | Configuration for sending cloud events to GCP Pub Sub | +| cloud_events.kafka | object | `{"brokers":["mybroker:443"],"saslConfig":{"enabled":false,"handshake":true,"mechanism":"PLAIN","password":"","passwordPath":"","user":"kafka"},"tlsConfig":{"certPath":"/etc/ssl/certs/kafka-client.crt","enabled":false,"keyPath":"/etc/ssl/certs/kafka-client.key"},"version":"3.7.0"}` | Configuration for sending cloud events to Kafka | +| cloud_events.kafka.brokers | list | `["mybroker:443"]` | The kafka brokers to talk to | +| cloud_events.kafka.saslConfig | object | `{"enabled":false,"handshake":true,"mechanism":"PLAIN","password":"","passwordPath":"","user":"kafka"}` | SASL based authentication | +| cloud_events.kafka.saslConfig.enabled | bool | `false` | Whether to use SASL authentication | +| cloud_events.kafka.saslConfig.handshake | bool | `true` | Whether the send the SASL handsahke first | +| cloud_events.kafka.saslConfig.mechanism | string | `"PLAIN"` | Which SASL mechanism to use. Defaults to PLAIN | +| cloud_events.kafka.saslConfig.password | string | `""` | The password for the kafka user | +| cloud_events.kafka.saslConfig.passwordPath | string | `""` | Optional mount path of file containing the kafka password. | +| cloud_events.kafka.saslConfig.user | string | `"kafka"` | The kafka user | +| cloud_events.kafka.tlsConfig | object | `{"certPath":"/etc/ssl/certs/kafka-client.crt","enabled":false,"keyPath":"/etc/ssl/certs/kafka-client.key"}` | Certificate based authentication | +| cloud_events.kafka.tlsConfig.certPath | string | `"/etc/ssl/certs/kafka-client.crt"` | Path to the client certificate | +| cloud_events.kafka.tlsConfig.enabled | bool | `false` | Whether to use certificate based authentication or TLS | +| cloud_events.kafka.tlsConfig.keyPath | string | `"/etc/ssl/certs/kafka-client.key"` | Path to the client private key | +| cloud_events.kafka.version | string | `"3.7.0"` | The version of Kafka | | cloud_events.type | string | `"aws"` | | | cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"nodeSelector":{},"podAnnotations":{},"podEnv":{},"podLabels":{},"prometheus":{"enabled":false,"path":"/metrics","port":10254},"resources":{},"service_account_name":"flyteadmin","standaloneDeployment":false,"templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | cluster_resource_manager.config | object | `{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}}` | Configmap for ClusterResource parameters | @@ -230,7 +245,6 @@ helm install gateway bitnami/contour -n flyte | flytepropeller.additionalVolumeMounts | list | `[]` | Appends additional volume mounts to the main container's spec. May include template values. | | flytepropeller.additionalVolumes | list | `[]` | Appends additional volumes to the deployment spec. May include template values. | | flytepropeller.affinity | object | `{}` | affinity for Flytepropeller deployment | -| flytepropeller.cacheSizeMbs | int | `0` | | | flytepropeller.clusterName | string | `""` | Defines the cluster name used in events sent to Admin | | flytepropeller.configPath | string | `"/etc/flyte/config/*.yaml"` | Default regex string for searching configuration files | | flytepropeller.createCRDs | bool | `true` | Whether to install the flyteworkflows CRD with helm | @@ -292,7 +306,7 @@ helm install gateway bitnami/contour -n flyte | sparkoperator.enabled | bool | `false` | - enable or disable Sparkoperator deployment installation | | sparkoperator.plugin_config | object | `{"plugins":{"spark":{"spark-config-default":[{"spark.hadoop.fs.s3a.aws.credentials.provider":"com.amazonaws.auth.DefaultAWSCredentialsProviderChain"},{"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":"2"},{"spark.kubernetes.allocation.batch.size":"50"},{"spark.hadoop.fs.s3a.acl.default":"BucketOwnerFullControl"},{"spark.hadoop.fs.s3n.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3n.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3a.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.multipart.threshold":"536870912"},{"spark.blacklist.enabled":"true"},{"spark.blacklist.timeout":"5m"},{"spark.task.maxfailures":"8"}]}}}` | Spark plugin configuration | | sparkoperator.plugin_config.plugins.spark.spark-config-default | list | `[{"spark.hadoop.fs.s3a.aws.credentials.provider":"com.amazonaws.auth.DefaultAWSCredentialsProviderChain"},{"spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version":"2"},{"spark.kubernetes.allocation.batch.size":"50"},{"spark.hadoop.fs.s3a.acl.default":"BucketOwnerFullControl"},{"spark.hadoop.fs.s3n.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3n.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.impl":"org.apache.hadoop.fs.s3a.S3AFileSystem"},{"spark.hadoop.fs.AbstractFileSystem.s3a.impl":"org.apache.hadoop.fs.s3a.S3A"},{"spark.hadoop.fs.s3a.multipart.threshold":"536870912"},{"spark.blacklist.enabled":"true"},{"spark.blacklist.timeout":"5m"},{"spark.task.maxfailures":"8"}]` | Spark default configuration | -| storage | object | `{"bucketName":"my-s3-bucket","custom":{},"enableMultiContainer":false,"gcs":null,"limits":{"maxDownloadMBs":10},"s3":{"accessKey":"","authType":"iam","region":"us-east-1","secretKey":""},"type":"sandbox"}` | ---------------------------------------------------- STORAGE SETTINGS | +| storage | object | `{"bucketName":"my-s3-bucket","cache":{"maxSizeMBs":0,"targetGCPercent":70},"custom":{},"enableMultiContainer":false,"gcs":null,"limits":{"maxDownloadMBs":10},"s3":{"accessKey":"","authType":"iam","region":"us-east-1","secretKey":""},"type":"sandbox"}` | ---------------------------------------------------- STORAGE SETTINGS | | storage.bucketName | string | `"my-s3-bucket"` | bucketName defines the storage bucket flyte will use. Required for all types except for sandbox. | | storage.custom | object | `{}` | Settings for storage type custom. See https://github.com/graymeta/stow for supported storage providers/settings. | | storage.enableMultiContainer | bool | `false` | toggles multi-container storage config | diff --git a/charts/flyte-core/templates/_helpers.tpl b/charts/flyte-core/templates/_helpers.tpl index f7b50c0b29..87a7615177 100755 --- a/charts/flyte-core/templates/_helpers.tpl +++ b/charts/flyte-core/templates/_helpers.tpl @@ -237,4 +237,7 @@ storage: enable-multicontainer: {{ .Values.storage.enableMultiContainer }} limits: maxDownloadMBs: {{ .Values.storage.limits.maxDownloadMBs }} + cache: + max_size_mbs: {{ .Values.storage.cache.maxSizeMBs }} + target_gc_percent: {{ .Values.storage.cache.targetGCPercent }} {{- end }} diff --git a/charts/flyte-core/templates/propeller/configmap.yaml b/charts/flyte-core/templates/propeller/configmap.yaml index 3e1b94ef2c..cceb748274 100644 --- a/charts/flyte-core/templates/propeller/configmap.yaml +++ b/charts/flyte-core/templates/propeller/configmap.yaml @@ -47,11 +47,6 @@ data: {{- end }} {{- end }} storage.yaml: | {{ tpl (include "storage" .) $ | nindent 4 }} - cache.yaml: | - storage: - cache: - max_size_mbs: {{ .Values.flytepropeller.cacheSizeMbs }} - target_gc_percent: 70 {{- with .Values.configmap.task_logs }} task_logs.yaml: | {{ tpl (toYaml .) $ | nindent 4 }} {{- end }} diff --git a/charts/flyte-core/values-eks.yaml b/charts/flyte-core/values-eks.yaml index 5a1cc1b94d..904a30bc78 100644 --- a/charts/flyte-core/values-eks.yaml +++ b/charts/flyte-core/values-eks.yaml @@ -102,7 +102,6 @@ flytepropeller: cpu: 1 ephemeral-storage: 1Gi memory: 2Gi - cacheSizeMbs: 1024 # -- Sets priorityClassName for propeller pod(s). priorityClassName: "system-cluster-critical" affinity: @@ -191,6 +190,8 @@ storage: bucketName: "{{ .Values.userSettings.bucketName }}" s3: region: "{{ .Values.userSettings.accountRegion }}" + cache: + maxSizeMBs: 1024 db: datacatalog: diff --git a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml index 961611b56c..1e088ea4b7 100644 --- a/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml +++ b/charts/flyte-core/values-keycloak-idp-flyteclients-without-browser.yaml @@ -215,7 +215,6 @@ flytepropeller: cpu: 10m ephemeral-storage: 50Mi memory: 100Mi - cacheSizeMbs: 0 # -- Default regex string for searching configuration files configPath: /etc/flyte/config/*.yaml @@ -399,6 +398,8 @@ storage: # -- default limits being applied to storage config limits: maxDownloadMBs: 10 + cache: + maxSizeMBs: 0 # Database configuration(These are the values for a pgdb instance with hostname of postgres-flyte and postgres/password creds) db: diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 33ef574690..9faaed731a 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -319,7 +319,6 @@ flytepropeller: cpu: 10m ephemeral-storage: 50Mi memory: 100Mi - cacheSizeMbs: 0 # -- Error reporting terminationMessagePolicy: FallbackToLogsOnError # -- Default regex string for searching configuration files @@ -610,6 +609,9 @@ storage: # -- default limits being applied to storage config limits: maxDownloadMBs: 10 + cache: + maxSizeMBs: 0 + targetGCPercent: 70 # Database configuration db: @@ -944,15 +946,48 @@ external_events: # an SNS topic (or gcp equivalent) cloud_events: enable: false - type: aws - aws: - region: us-east-2 eventsPublisher: # Make sure this is not a fifo queue. Admin does not yet support # writing to fifo sns topics. topicName: "arn:aws:sns:us-east-2:123456:123-my-topic" eventTypes: - all # Or workflow, node, task. Or "*" + type: aws + # -- Configuration for sending cloud events to AWS SNS + aws: + region: us-east-2 + # -- Configuration for sending cloud events to GCP Pub Sub + gcp: + region: us-east1 + # -- Configuration for sending cloud events to Kafka + kafka: + # -- The version of Kafka + version: "3.7.0" + # -- The kafka brokers to talk to + brokers: + - mybroker:443 + # -- SASL based authentication + saslConfig: + # -- Whether to use SASL authentication + enabled: false + # -- The kafka user + user: kafka + # -- The password for the kafka user + password: "" + # -- Optional mount path of file containing the kafka password. + passwordPath: "" + # -- Whether the send the SASL handsahke first + handshake: true + # -- Which SASL mechanism to use. Defaults to PLAIN + mechanism: PLAIN + # -- Certificate based authentication + tlsConfig: + # -- Whether to use certificate based authentication or TLS + enabled: false + # -- Path to the client certificate + certPath: /etc/ssl/certs/kafka-client.crt + # -- Path to the client private key + keyPath: /etc/ssl/certs/kafka-client.key # -- Configuration for the Cluster resource manager component. This is an optional component, that enables automatic # cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain diff --git a/charts/flyte/README.md b/charts/flyte/README.md index 679923d8cd..944c624ab6 100644 --- a/charts/flyte/README.md +++ b/charts/flyte/README.md @@ -71,7 +71,7 @@ helm upgrade -f values-sandbox.yaml flyte . | contour.tolerations | list | `[]` | tolerations for Contour deployment | | daskoperator | object | `{"enabled":false}` | Optional: Dask Plugin using the Dask Operator | | daskoperator.enabled | bool | `false` | - enable or disable the dask operator deployment installation | -| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.2","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.2"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.2"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"cacheSizeMbs":0,"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.2"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.2"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | +| flyte | object | `{"cluster_resource_manager":{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]},"common":{"databaseSecret":{"name":"","secretManifest":{}},"flyteNamespaceTemplate":{"enabled":false},"ingress":{"albSSLRedirect":false,"annotations":{"nginx.ingress.kubernetes.io/app-root":"/console"},"enabled":true,"host":"","separateGrpcIngress":false,"separateGrpcIngressAnnotations":{"nginx.ingress.kubernetes.io/backend-protocol":"GRPC"},"tls":{"enabled":false},"webpackHMR":true}},"configmap":{"adminServer":{"auth":{"appAuth":{"thirdPartyConfig":{"flyteClient":{"clientId":"flytectl","redirectUri":"http://localhost:53593/callback","scopes":["offline","all"]}}},"authorizedUris":["https://localhost:30081","http://flyteadmin:80","http://flyteadmin.flyte.svc.cluster.local:80"],"userAuth":{"openId":{"baseUrl":"https://accounts.google.com","clientId":"657465813211-6eog7ek7li5k7i7fvgv2921075063hpe.apps.googleusercontent.com","scopes":["profile","openid"]}}},"flyteadmin":{"eventVersion":2,"metadataStoragePrefix":["metadata","admin"],"metricsScope":"flyte:","profilerPort":10254,"roleNameKey":"iam.amazonaws.com/role","testing":{"host":"http://flyteadmin"}},"server":{"grpc":{"port":8089},"httpPort":8088,"security":{"allowCors":true,"allowedHeaders":["Content-Type","flyte-authorization"],"allowedOrigins":["*"],"secure":false,"useAuth":false}}},"catalog":{"catalog-cache":{"endpoint":"datacatalog:89","insecure":true,"type":"datacatalog"}},"console":{"BASE_URL":"/console","CONFIG_DIR":"/etc/flyte/config"},"copilot":{"plugins":{"k8s":{"co-pilot":{"image":"cr.flyte.org/flyteorg/flytecopilot:v1.13.2","name":"flyte-copilot-","start-timeout":"30s"}}}},"core":{"propeller":{"downstream-eval-duration":"30s","enable-admin-launcher":true,"leader-election":{"enabled":true,"lease-duration":"15s","lock-config-map":{"name":"propeller-leader","namespace":"flyte"},"renew-deadline":"10s","retry-period":"2s"},"limit-namespace":"all","max-workflow-retries":30,"metadata-prefix":"metadata/propeller","metrics-prefix":"flyte","prof-port":10254,"queue":{"batch-size":-1,"batching-interval":"2s","queue":{"base-delay":"5s","capacity":1000,"max-delay":"120s","rate":100,"type":"maxof"},"sub-queue":{"capacity":100,"rate":10,"type":"bucket"},"type":"batch"},"rawoutput-prefix":"s3://my-s3-bucket/","workers":4,"workflow-reeval-duration":"30s"},"webhook":{"certDir":"/etc/webhook/certs","serviceName":"flyte-pod-webhook"}},"datacatalogServer":{"application":{"grpcPort":8089,"grpcServerReflection":true,"httpPort":8080},"datacatalog":{"metrics-scope":"datacatalog","profiler-port":10254,"storage-prefix":"metadata/datacatalog"}},"domain":{"domains":[{"id":"development","name":"development"},{"id":"staging","name":"staging"},{"id":"production","name":"production"}]},"enabled_plugins":{"tasks":{"task-plugins":{"default-for-task-types":{"container":"container","container_array":"k8s-array","sensor":"agent-service","sidecar":"sidecar"},"enabled-plugins":["container","sidecar","k8s-array","agent-service","echo"]}}},"k8s":{"plugins":{"k8s":{"default-cpus":"100m","default-env-from-configmaps":[],"default-env-from-secrets":[],"default-env-vars":[{"FLYTE_AWS_ENDPOINT":"http://minio.flyte:9000"},{"FLYTE_AWS_ACCESS_KEY_ID":"minio"},{"FLYTE_AWS_SECRET_ACCESS_KEY":"miniostorage"}],"default-memory":"200Mi"}}},"logger":{"logger":{"level":5,"show-source":true}},"remoteData":{"remoteData":{"region":"us-east-1","scheme":"local","signedUrls":{"durationMinutes":3}}},"resource_manager":{"propeller":{"resourcemanager":{"redis":null,"type":"noop"}}},"task_logs":{"plugins":{"logs":{"cloudwatch-enabled":false,"kubernetes-enabled":true,"kubernetes-template-uri":"http://localhost:30082/#/log/{{ \"{{\" }} .namespace {{ \"}}\" }}/{{ \"{{\" }} .podName {{ \"}}\" }}/pod?namespace={{ \"{{\" }} .namespace {{ \"}}\" }}"}}},"task_resource_defaults":{"task_resources":{"defaults":{"cpu":"100m","memory":"200Mi","storage":"5Mi"},"limits":{"cpu":2,"gpu":1,"memory":"1Gi","storage":"20Mi"}}}},"datacatalog":{"affinity":{},"configPath":"/etc/datacatalog/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/datacatalog","tag":"v1.13.2"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"NodePort"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"db":{"admin":{"database":{"dbname":"flyteadmin","host":"postgres","port":5432,"username":"postgres"}},"datacatalog":{"database":{"dbname":"datacatalog","host":"postgres","port":5432,"username":"postgres"}}},"deployRedoc":true,"flyteadmin":{"additionalVolumeMounts":[],"additionalVolumes":[],"affinity":{},"configPath":"/etc/flyte/config/*.yaml","env":[],"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteadmin","tag":"v1.13.2"},"initialProjects":["flytesnacks","flytetester","flyteexamples"],"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"loadBalancerSourceRanges":[],"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flyteconsole":{"affinity":{},"ga":{"enabled":true,"tracking_id":"G-0QW4DJWJ20"},"image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flyteconsole","tag":"v1.17.1"},"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"500m","memory":"275Mi"},"requests":{"cpu":"10m","memory":"250Mi"}},"service":{"annotations":{},"type":"ClusterIP"},"tolerations":[]},"flytepropeller":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytepropeller","tag":"v1.13.2"},"manager":false,"nodeSelector":{},"podAnnotations":{},"replicaCount":1,"resources":{"limits":{"cpu":"200m","ephemeral-storage":"100Mi","memory":"200Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"flytescheduler":{"affinity":{},"configPath":"/etc/flyte/config/*.yaml","image":{"pullPolicy":"IfNotPresent","repository":"cr.flyte.org/flyteorg/flytescheduler","tag":"v1.13.2"},"nodeSelector":{},"podAnnotations":{},"resources":{"limits":{"cpu":"250m","ephemeral-storage":"100Mi","memory":"500Mi"},"requests":{"cpu":"10m","ephemeral-storage":"50Mi","memory":"50Mi"}},"secrets":{},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]},"tolerations":[]},"storage":{"bucketName":"my-s3-bucket","cache":{"maxSizeMBs":0,"targetGCPercent":70},"custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"},"webhook":{"enabled":true,"service":{"annotations":{"projectcontour.io/upstream-protocol.h2c":"grpc"},"type":"ClusterIP"},"serviceAccount":{"annotations":{},"create":true,"imagePullSecrets":[]}},"workflow_notifications":{"config":{},"enabled":false},"workflow_scheduler":{"enabled":true,"type":"native"}}` | ------------------------------------------------------------------- Core System settings This section consists of Core components of Flyte and their deployment settings. This includes FlyteAdmin service, Datacatalog, FlytePropeller and Flyteconsole | | flyte.cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"service_account_name":"flyteadmin","templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | flyte.cluster_resource_manager.config.cluster_resources | object | `{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refresh":"5m","refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}` | ClusterResource parameters Refer to the [structure](https://pkg.go.dev/github.com/lyft/flyteadmin@v0.3.37/pkg/runtime/interfaces#ClusterResourceConfig) to customize. | | flyte.cluster_resource_manager.config.cluster_resources.standaloneDeployment | bool | `false` | Starts the cluster resource manager in standalone mode with requisite auth credentials to call flyteadmin service endpoints | @@ -185,7 +185,7 @@ helm upgrade -f values-sandbox.yaml flyte . | flyte.flytescheduler.serviceAccount.create | bool | `true` | Should a service account be created for Flytescheduler | | flyte.flytescheduler.serviceAccount.imagePullSecrets | list | `[]` | ImagePullSecrets to automatically assign to the service account | | flyte.flytescheduler.tolerations | list | `[]` | tolerations for Flytescheduler deployment | -| flyte.storage | object | `{"bucketName":"my-s3-bucket","custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"}` | ---------------------------------------------------- STORAGE SETTINGS | +| flyte.storage | object | `{"bucketName":"my-s3-bucket","cache":{"maxSizeMBs":0,"targetGCPercent":70},"custom":{},"gcs":null,"s3":{"region":"us-east-1"},"type":"sandbox"}` | ---------------------------------------------------- STORAGE SETTINGS | | flyte.storage.bucketName | string | `"my-s3-bucket"` | bucketName defines the storage bucket flyte will use. Required for all types except for sandbox. | | flyte.storage.custom | object | `{}` | Settings for storage type custom. See https://github.com/graymeta/stow for supported storage providers/settings. | | flyte.storage.gcs | string | `nil` | settings for storage type gcs | diff --git a/charts/flyte/values.yaml b/charts/flyte/values.yaml index cfdb31ae93..8231f5bda1 100755 --- a/charts/flyte/values.yaml +++ b/charts/flyte/values.yaml @@ -190,7 +190,6 @@ flyte: cpu: 10m ephemeral-storage: 50Mi memory: 50Mi - cacheSizeMbs: 0 # -- Default regex string for searching configuration files configPath: /etc/flyte/config/*.yaml @@ -333,6 +332,9 @@ flyte: # serviceAccountKey: "" # -- Settings for storage type custom. See https://github.com/graymeta/stow for supported storage providers/settings. custom: {} + cache: + maxSizeMBs: 0 + targetGCPercent: 70 # Database configuration db: diff --git a/cmd/single/config.go b/cmd/single/config.go index adbabe7ae5..28cdfdafc0 100644 --- a/cmd/single/config.go +++ b/cmd/single/config.go @@ -1,6 +1,9 @@ package single -import "github.com/flyteorg/flyte/flytestdlib/config" +import ( + adminRepositoriesConfig "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/config" + "github.com/flyteorg/flyte/flytestdlib/config" +) //go:generate pflags Config --default-var=DefaultConfig @@ -21,10 +24,11 @@ type Propeller struct { } type Admin struct { - Disabled bool `json:"disabled" pflag:",Disables flyteadmin in the single binary mode"` - DisableScheduler bool `json:"disableScheduler" pflag:",Disables Native scheduler in the single binary mode"` - DisableClusterResourceManager bool `json:"disableClusterResourceManager" pflag:",Disables Cluster resource manager"` - SeedProjects []string `json:"seedProjects" pflag:",flyte projects to create by default."` + Disabled bool `json:"disabled" pflag:",Disables flyteadmin in the single binary mode"` + DisableScheduler bool `json:"disableScheduler" pflag:",Disables Native scheduler in the single binary mode"` + DisableClusterResourceManager bool `json:"disableClusterResourceManager" pflag:",Disables Cluster resource manager"` + SeedProjects []string `json:"seedProjects" pflag:",flyte projects to create by default."` + SeedProjectsWithDetails []adminRepositoriesConfig.SeedProject `json:"seedProjectsWithDetails" pflag:",,Detailed configuration for Flyte projects to be created by default."` } type DataCatalog struct { diff --git a/cmd/single/root.go b/cmd/single/root.go index 1665b1841f..348e1eb557 100644 --- a/cmd/single/root.go +++ b/cmd/single/root.go @@ -8,10 +8,12 @@ import ( "github.com/flyteorg/flyte/flytestdlib/logger" - "github.com/flyteorg/flyte/flytestdlib/config" - "github.com/flyteorg/flyte/flytestdlib/config/viper" "github.com/spf13/cobra" "github.com/spf13/pflag" + + "github.com/flyteorg/flyte/flytestdlib/config" + "github.com/flyteorg/flyte/flytestdlib/config/viper" + _ "github.com/flyteorg/flyte/flytestdlib/promutils" ) var ( diff --git a/cmd/single/start.go b/cmd/single/start.go index 1683fad4e1..f9e38a9626 100644 --- a/cmd/single/start.go +++ b/cmd/single/start.go @@ -22,6 +22,7 @@ import ( datacatalog "github.com/flyteorg/flyte/datacatalog/pkg/rpc/datacatalogservice" "github.com/flyteorg/flyte/flyteadmin/pkg/clusterresource" "github.com/flyteorg/flyte/flyteadmin/pkg/common" + adminRepositoriesConfig "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/config" "github.com/flyteorg/flyte/flyteadmin/pkg/runtime" adminServer "github.com/flyteorg/flyte/flyteadmin/pkg/server" "github.com/flyteorg/flyte/flyteadmin/plugins" @@ -75,8 +76,9 @@ func startAdmin(ctx context.Context, cfg Admin) error { if len(cfg.SeedProjects) != 0 { projects = cfg.SeedProjects } - logger.Infof(ctx, "Seeding default projects...", projects) - if err := adminServer.SeedProjects(ctx, projects); err != nil { + seedProjects := adminRepositoriesConfig.MergeSeedProjectsWithUniqueNames(projects, cfg.SeedProjectsWithDetails) + logger.Infof(ctx, "Seeding default projects... %v", seedProjects) + if err := adminServer.SeedProjects(ctx, seedProjects); err != nil { return err } diff --git a/datacatalog/go.mod b/datacatalog/go.mod index 74424496bf..53fc2ce90f 100644 --- a/datacatalog/go.mod +++ b/datacatalog/go.mod @@ -38,7 +38,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect @@ -86,7 +86,6 @@ require ( github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-sqlite3 v1.14.17 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -96,10 +95,10 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -119,9 +118,9 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect @@ -132,7 +131,7 @@ require ( google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/datacatalog/go.sum b/datacatalog/go.sum index 0346b153a4..b4c75df142 100644 --- a/datacatalog/go.sum +++ b/datacatalog/go.sum @@ -73,8 +73,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -301,8 +301,6 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -330,15 +328,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -439,8 +437,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -509,8 +507,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -749,8 +747,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml index a5aca206ba..d7cb3500d6 100644 --- a/deployment/eks/flyte_aws_scheduler_helm_generated.yaml +++ b/deployment/eks/flyte_aws_scheduler_helm_generated.yaml @@ -189,6 +189,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -398,6 +401,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 --- # Source: flyte-core/templates/propeller/configmap.yaml apiVersion: v1 @@ -512,8 +518,6 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: max_size_mbs: 1024 target_gc_percent: 70 @@ -857,7 +861,7 @@ spec: template: metadata: annotations: - configChecksum: "618a516ca42e8bbe5222a76f7865a0a444b6048002d7fcc06144c9188f3fd3d" + configChecksum: "c943b200cd0bed97fe456c0c713dd79cdc4e22133495cac89db3fc55e9b79c7" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -1177,7 +1181,7 @@ spec: template: metadata: annotations: - configChecksum: "c2a15ce5dc2fa465986d6006f93450723da58166b3ad5ee35a91cb37d5c39da" + configChecksum: "ded28f3a68d22eb8e5af14a44cc0d14326f10060405268aac5a3665fb86c8bc" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -1279,7 +1283,7 @@ spec: template: metadata: annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1363,7 +1367,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/eks/flyte_helm_controlplane_generated.yaml b/deployment/eks/flyte_helm_controlplane_generated.yaml index 6d2d760b8d..60ca7d1720 100644 --- a/deployment/eks/flyte_helm_controlplane_generated.yaml +++ b/deployment/eks/flyte_helm_controlplane_generated.yaml @@ -170,6 +170,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -364,6 +367,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 --- # Source: flyte-core/templates/flytescheduler/configmap.yaml apiVersion: v1 @@ -560,7 +566,7 @@ spec: template: metadata: annotations: - configChecksum: "5ce6f593fb92c9a6fd183825231d187471b5f10fe948f601f6d5b56edd02b51" + configChecksum: "391e8e126d669f751ac1a03de0b45fe7969a0fe58f3dfead9bb7be1b5d951ff" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -880,7 +886,7 @@ spec: template: metadata: annotations: - configChecksum: "c2a15ce5dc2fa465986d6006f93450723da58166b3ad5ee35a91cb37d5c39da" + configChecksum: "ded28f3a68d22eb8e5af14a44cc0d14326f10060405268aac5a3665fb86c8bc" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -982,7 +988,7 @@ spec: template: metadata: annotations: - configChecksum: "5ce6f593fb92c9a6fd183825231d187471b5f10fe948f601f6d5b56edd02b51" + configChecksum: "391e8e126d669f751ac1a03de0b45fe7969a0fe58f3dfead9bb7be1b5d951ff" labels: app.kubernetes.io/name: flytescheduler app.kubernetes.io/instance: flyte diff --git a/deployment/eks/flyte_helm_dataplane_generated.yaml b/deployment/eks/flyte_helm_dataplane_generated.yaml index d2071582f5..682d1cef01 100644 --- a/deployment/eks/flyte_helm_dataplane_generated.yaml +++ b/deployment/eks/flyte_helm_dataplane_generated.yaml @@ -176,8 +176,6 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: max_size_mbs: 1024 target_gc_percent: 70 @@ -430,7 +428,7 @@ spec: template: metadata: annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -514,7 +512,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/eks/flyte_helm_generated.yaml b/deployment/eks/flyte_helm_generated.yaml index 61d81ec1e6..5e0ae72ec2 100644 --- a/deployment/eks/flyte_helm_generated.yaml +++ b/deployment/eks/flyte_helm_generated.yaml @@ -201,6 +201,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -395,6 +398,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 1024 + target_gc_percent: 70 --- # Source: flyte-core/templates/flytescheduler/configmap.yaml apiVersion: v1 @@ -543,8 +549,6 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: max_size_mbs: 1024 target_gc_percent: 70 @@ -888,7 +892,7 @@ spec: template: metadata: annotations: - configChecksum: "5ce6f593fb92c9a6fd183825231d187471b5f10fe948f601f6d5b56edd02b51" + configChecksum: "391e8e126d669f751ac1a03de0b45fe7969a0fe58f3dfead9bb7be1b5d951ff" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -1208,7 +1212,7 @@ spec: template: metadata: annotations: - configChecksum: "c2a15ce5dc2fa465986d6006f93450723da58166b3ad5ee35a91cb37d5c39da" + configChecksum: "ded28f3a68d22eb8e5af14a44cc0d14326f10060405268aac5a3665fb86c8bc" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -1310,7 +1314,7 @@ spec: template: metadata: annotations: - configChecksum: "5ce6f593fb92c9a6fd183825231d187471b5f10fe948f601f6d5b56edd02b51" + configChecksum: "391e8e126d669f751ac1a03de0b45fe7969a0fe58f3dfead9bb7be1b5d951ff" labels: app.kubernetes.io/name: flytescheduler app.kubernetes.io/instance: flyte @@ -1409,7 +1413,7 @@ spec: template: metadata: annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1493,7 +1497,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "6c8b21f7f9e96d92cfc0932e5a4289e969380662d96d3e6728a142bf01291c1" + configChecksum: "6572aa999f8e6842b4dba120e12e6ccb8cdfa506373de2a267b62a63146ccde" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/gcp/flyte_helm_controlplane_generated.yaml b/deployment/gcp/flyte_helm_controlplane_generated.yaml index 12f144132f..29367a5b37 100644 --- a/deployment/gcp/flyte_helm_controlplane_generated.yaml +++ b/deployment/gcp/flyte_helm_controlplane_generated.yaml @@ -175,6 +175,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -377,6 +380,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 --- # Source: flyte-core/templates/flytescheduler/configmap.yaml apiVersion: v1 @@ -575,7 +581,7 @@ spec: template: metadata: annotations: - configChecksum: "0705f122f2535babec96a6083827c3e6d27e6e9b0e460b4d07292c858079ac7" + configChecksum: "20a517901c6b6f01f47e968fa15ca51f6d9522e728ecace8b48553eb428cde6" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -895,7 +901,7 @@ spec: template: metadata: annotations: - configChecksum: "905a2a911a85dbf8d4f1dc3be24b9c4fd7bb46481db0e174274d6aea6129b4c" + configChecksum: "537b12b49584e5eb9da85bbf0d8d8d21d8edce0560b0b53f595485f2cdb1cb6" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -997,7 +1003,7 @@ spec: template: metadata: annotations: - configChecksum: "0705f122f2535babec96a6083827c3e6d27e6e9b0e460b4d07292c858079ac7" + configChecksum: "20a517901c6b6f01f47e968fa15ca51f6d9522e728ecace8b48553eb428cde6" labels: app.kubernetes.io/name: flytescheduler app.kubernetes.io/instance: flyte diff --git a/deployment/gcp/flyte_helm_dataplane_generated.yaml b/deployment/gcp/flyte_helm_dataplane_generated.yaml index b3f9bdc12d..8196b38520 100644 --- a/deployment/gcp/flyte_helm_dataplane_generated.yaml +++ b/deployment/gcp/flyte_helm_dataplane_generated.yaml @@ -179,10 +179,8 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: - max_size_mbs: 1024 + max_size_mbs: 0 target_gc_percent: 70 task_logs.yaml: | plugins: @@ -438,7 +436,7 @@ spec: template: metadata: annotations: - configChecksum: "0056f7638fe13bd9187bfaf4011fde4cfbecd13c152ba443bba7fe25ca4777a" + configChecksum: "8562f7f608d4936e13f6ad70c18c7c095068e742243e7f380f89694d2182110" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -521,7 +519,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "0056f7638fe13bd9187bfaf4011fde4cfbecd13c152ba443bba7fe25ca4777a" + configChecksum: "8562f7f608d4936e13f6ad70c18c7c095068e742243e7f380f89694d2182110" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/gcp/flyte_helm_generated.yaml b/deployment/gcp/flyte_helm_generated.yaml index 2d0a658271..ce1f64c1df 100644 --- a/deployment/gcp/flyte_helm_generated.yaml +++ b/deployment/gcp/flyte_helm_generated.yaml @@ -206,6 +206,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -408,6 +411,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 --- # Source: flyte-core/templates/flytescheduler/configmap.yaml apiVersion: v1 @@ -559,10 +565,8 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: - max_size_mbs: 1024 + max_size_mbs: 0 target_gc_percent: 70 task_logs.yaml: | plugins: @@ -911,7 +915,7 @@ spec: template: metadata: annotations: - configChecksum: "0705f122f2535babec96a6083827c3e6d27e6e9b0e460b4d07292c858079ac7" + configChecksum: "20a517901c6b6f01f47e968fa15ca51f6d9522e728ecace8b48553eb428cde6" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -1231,7 +1235,7 @@ spec: template: metadata: annotations: - configChecksum: "905a2a911a85dbf8d4f1dc3be24b9c4fd7bb46481db0e174274d6aea6129b4c" + configChecksum: "537b12b49584e5eb9da85bbf0d8d8d21d8edce0560b0b53f595485f2cdb1cb6" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -1333,7 +1337,7 @@ spec: template: metadata: annotations: - configChecksum: "0705f122f2535babec96a6083827c3e6d27e6e9b0e460b4d07292c858079ac7" + configChecksum: "20a517901c6b6f01f47e968fa15ca51f6d9522e728ecace8b48553eb428cde6" labels: app.kubernetes.io/name: flytescheduler app.kubernetes.io/instance: flyte @@ -1432,7 +1436,7 @@ spec: template: metadata: annotations: - configChecksum: "0056f7638fe13bd9187bfaf4011fde4cfbecd13c152ba443bba7fe25ca4777a" + configChecksum: "8562f7f608d4936e13f6ad70c18c7c095068e742243e7f380f89694d2182110" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -1515,7 +1519,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "0056f7638fe13bd9187bfaf4011fde4cfbecd13c152ba443bba7fe25ca4777a" + configChecksum: "8562f7f608d4936e13f6ad70c18c7c095068e742243e7f380f89694d2182110" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml index 2704a2eac6..6fafa61550 100644 --- a/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml +++ b/deployment/sandbox-binary/flyte_sandbox_binary_helm_generated.yaml @@ -92,6 +92,9 @@ data: disabled: false seedProjects: - flytesnacks + seedProjectsWithDetails: + - description: Default project setup. + name: flytesnacks dataCatalog: disabled: false propeller: @@ -360,7 +363,7 @@ spec: app.kubernetes.io/instance: flyte app.kubernetes.io/component: flyte-binary annotations: - checksum/configuration: faaefbd3b3b2ddfd4e718bd77c02c632c75e7111dad0a6e25dc415dc88add73f + checksum/configuration: 886440a42b3eeec802cfe60d37885f69e35ffd83e53e625b3c877da5e8c7eb38 checksum/configuration-secret: d5d93f4e67780b21593dc3799f0f6682aab0765e708e4020939975d14d44f929 checksum/cluster-resource-templates: 7dfa59f3d447e9c099b8f8ffad3af466fecbc9cf9f8c97295d9634254a55d4ae spec: diff --git a/deployment/sandbox/flyte_helm_generated.yaml b/deployment/sandbox/flyte_helm_generated.yaml index 811d77e049..22b4855352 100644 --- a/deployment/sandbox/flyte_helm_generated.yaml +++ b/deployment/sandbox/flyte_helm_generated.yaml @@ -331,6 +331,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 task_resource_defaults.yaml: | task_resources: defaults: @@ -517,6 +520,9 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 + cache: + max_size_mbs: 0 + target_gc_percent: 70 --- # Source: flyte/charts/flyte/templates/flytescheduler/configmap.yaml apiVersion: v1 @@ -682,8 +688,6 @@ data: enable-multicontainer: false limits: maxDownloadMBs: 10 - cache.yaml: | - storage: cache: max_size_mbs: 0 target_gc_percent: 70 @@ -6695,7 +6699,7 @@ spec: template: metadata: annotations: - configChecksum: "88625d852360c42642190e21751f32f7dd9501ce3d479fa68b86478995ff689" + configChecksum: "f2d2bbea27b58cc5a73da30eb8aeb56fc41863f4eba2bfe407da2e97a6372e8" labels: app.kubernetes.io/name: flyteadmin app.kubernetes.io/instance: flyte @@ -6997,7 +7001,7 @@ spec: template: metadata: annotations: - configChecksum: "ccdd0d27618b8053a8ae11046fd2b84b9a397144dd81c7113f398cddf001397" + configChecksum: "0df67e720160bb897fcb950f39eede7efbd668c770872d171469df8dc1dd70f" labels: app.kubernetes.io/name: datacatalog app.kubernetes.io/instance: flyte @@ -7088,7 +7092,7 @@ spec: template: metadata: annotations: - configChecksum: "88625d852360c42642190e21751f32f7dd9501ce3d479fa68b86478995ff689" + configChecksum: "f2d2bbea27b58cc5a73da30eb8aeb56fc41863f4eba2bfe407da2e97a6372e8" labels: app.kubernetes.io/name: flytescheduler app.kubernetes.io/instance: flyte @@ -7183,7 +7187,7 @@ spec: template: metadata: annotations: - configChecksum: "eda2e0a9fc32f46471061f47df324b50fe3ded8a90c0ac18a75755c50eb80b7" + configChecksum: "7ab9aee83ad8109354235eee7f46c3f091d7c70cd55157a195f4997d247f933" prometheus.io/path: "/metrics" prometheus.io/port: "10254" labels: @@ -7259,7 +7263,7 @@ spec: app.kubernetes.io/name: flyte-pod-webhook app.kubernetes.io/version: v1.13.2 annotations: - configChecksum: "eda2e0a9fc32f46471061f47df324b50fe3ded8a90c0ac18a75755c50eb80b7" + configChecksum: "7ab9aee83ad8109354235eee7f46c3f091d7c70cd55157a195f4997d247f933" prometheus.io/path: "/metrics" prometheus.io/port: "10254" spec: diff --git a/deployment/stats/prometheus/flyteuser-dashboard.json b/deployment/stats/prometheus/flyteuser-dashboard.json index 55c7ad5851..36eb2bb7bf 100644 --- a/deployment/stats/prometheus/flyteuser-dashboard.json +++ b/deployment/stats/prometheus/flyteuser-dashboard.json @@ -12,7 +12,7 @@ "annotations": { "list": [] }, - "description": "Flyte User Dashboard. This is great to get a birds-eye and drill down view of executions in your Flyte cluster. Useful for the user.", + "description": "Flyte User Dashboard. It's designed to give an overview of execution status and resource consumption.", "editable": false, "gnetId": null, "graphTooltip": 0, @@ -40,7 +40,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -96,7 +97,7 @@ "targets": [ { "datasource": null, - "expr": "sum(rate(flyte:propeller:all:workflow:accepted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m]))", + "expr": "avg(flyte:propeller:all:workflow:accepted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"})", "format": "time_series", "hide": false, "instant": false, @@ -104,7 +105,7 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "sum(rate(flyte:propeller:all:workflow:accepted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m]))", + "query": "avg(flyte:propeller:all:workflow:accepted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"})", "refId": "A", "step": 10, "target": "" @@ -113,7 +114,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Accepted Workflow", + "title": "Accepted Workflows (avg)", "tooltip": { "msResolution": true, "shared": true, @@ -167,7 +168,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -240,7 +242,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Successful Workflow", + "title": "Workflow success rate", "tooltip": { "msResolution": true, "shared": true, @@ -294,7 +296,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -367,7 +370,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Failed Workflow", + "title": "Workflow failure rate", "tooltip": { "msResolution": true, "shared": true, @@ -421,7 +424,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -477,7 +481,7 @@ "targets": [ { "datasource": null, - "expr": "sum(rate(flyte:propeller:all:workflow:workflow_aborted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m]))", + "expr": "avg_over_time(flyte:propeller:all:workflow:workflow_aborted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m])", "format": "time_series", "hide": false, "instant": false, @@ -485,7 +489,7 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "sum(rate(flyte:propeller:all:workflow:workflow_aborted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m]))", + "query": "avg_over_time(flyte:propeller:all:workflow:workflow_aborted{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}[5m])", "refId": "A", "step": 10, "target": "" @@ -494,7 +498,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Aborted Workflow", + "title": "Aborted Workflows (avg)", "tooltip": { "msResolution": true, "shared": true, @@ -513,7 +517,7 @@ "yaxes": [ { "decimals": null, - "format": "ops", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -536,8 +540,6 @@ } }, { - "aliasColors": {}, - "bars": false, "cacheTimeout": null, "datasource": "${DS_PROM}", "description": null, @@ -547,64 +549,84 @@ "defaults": { "thresholds": { "mode": "absolute", - "steps": [] + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ] } } }, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, "gridPos": null, "height": null, "hideTimeOverride": false, "id": 5, "interval": null, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "sort": null, - "sortDesc": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, "links": [], "maxDataPoints": 100, "maxPerRow": null, "minSpan": null, - "nullPointMode": "connected", "options": { - "alertThreshold": true, - "dataLinks": [] + "displayMode": "lcd", + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "decimals": null, + "links": [], + "max": 100, + "min": 0, + "title": null, + "unit": "s" + }, + "limit": null, + "mappings": [], + "override": {}, + "thresholds": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ], + "values": false + }, + "orientation": "horizontal", + "showThresholdLabels": false, + "showThresholdMarkers": true }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", "repeat": null, "repeatDirection": null, - "seriesOverrides": [], "span": 2, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": null, - "expr": "sum(flyte:propeller:all:workflow:success_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", + "expr": "(avg(flyte:propeller:all:workflow:success_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by(quantile))/1000", "format": "time_series", "hide": false, "instant": false, @@ -612,59 +634,20 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "sum(flyte:propeller:all:workflow:success_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", + "query": "(avg(flyte:propeller:all:workflow:success_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by(quantile))/1000", "refId": "A", "step": 10, "target": "" } ], - "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Successful workflow execution time by Quantile", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, + "title": "Successful wf execution duration by quantile", "transformations": [], "transparent": false, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": 0 - } + "type": "bargauge" }, { - "aliasColors": {}, - "bars": false, "cacheTimeout": null, "datasource": "${DS_PROM}", "description": null, @@ -674,191 +657,84 @@ "defaults": { "thresholds": { "mode": "absolute", - "steps": [] + "steps": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ] } } }, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" - }, "gridPos": null, "height": null, "hideTimeOverride": false, "id": 6, "interval": null, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "sort": null, - "sortDesc": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, "links": [], "maxDataPoints": 100, "maxPerRow": null, "minSpan": null, - "nullPointMode": "connected", "options": { - "alertThreshold": true, - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", - "repeat": null, - "repeatDirection": null, - "seriesOverrides": [], - "span": 2, - "stack": false, - "steppedLine": false, - "targets": [ - { - "datasource": null, - "expr": "sum(flyte:propeller:all:workflow:failure_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", - "format": "time_series", - "hide": false, - "instant": false, - "interval": "", - "intervalFactor": 2, - "legendFormat": "", - "metric": "", - "query": "sum(flyte:propeller:all:workflow:failure_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", - "refId": "A", - "step": 10, - "target": "" - } - ], - "thresholds": [], - "timeFrom": null, - "timeShift": null, - "title": "Failed workflow execution time by Quantile", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, - "transformations": [], - "transparent": false, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true + "displayMode": "lcd", + "fieldOptions": { + "calcs": [ + "mean" + ], + "defaults": { + "decimals": null, + "links": [], + "max": 100, + "min": 0, + "title": null, + "unit": "s" + }, + "limit": null, + "mappings": [], + "override": {}, + "thresholds": [ + { + "color": "green", + "index": 0, + "line": true, + "op": "gt", + "value": "null", + "yaxis": "left" + }, + { + "color": "red", + "index": 1, + "line": true, + "op": "gt", + "value": 80.0, + "yaxis": "left" + } + ], + "values": false }, - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": 0 - } - }, - { - "aliasColors": {}, - "bars": false, - "cacheTimeout": null, - "datasource": "${DS_PROM}", - "description": null, - "editable": true, - "error": false, - "fieldConfig": { - "defaults": { - "thresholds": { - "mode": "absolute", - "steps": [] - } - } - }, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" + "orientation": "horizontal", + "showThresholdLabels": false, + "showThresholdMarkers": true }, - "gridPos": null, - "height": null, - "hideTimeOverride": false, - "id": 7, - "interval": null, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "sort": null, - "sortDesc": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, - "links": [], - "maxDataPoints": 100, - "maxPerRow": null, - "minSpan": null, - "nullPointMode": "connected", - "options": { - "alertThreshold": true, - "dataLinks": [] - }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", "repeat": null, "repeatDirection": null, - "seriesOverrides": [], "span": 2, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": null, - "expr": "sum(flyte:propeller:all:node:queueing_latency_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", + "expr": "(avg(flyte:propeller:all:workflow:failure_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by(quantile))/1000", "format": "time_series", "hide": false, "instant": false, @@ -866,55 +742,18 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "sum(flyte:propeller:all:node:queueing_latency_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by (quantile)", + "query": "(avg(flyte:propeller:all:workflow:failure_duration_ms{project=~\"$project\", domain=~\"$domain\", wf=~\"$workflow\"}) by(quantile))/1000", "refId": "A", "step": 10, "target": "" } ], - "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Node queuing latency by Quantile", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, + "title": "Failed wf execution duration by quantile", "transformations": [], "transparent": false, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "ms", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": 0 - } + "type": "bargauge" } ], "repeat": null, @@ -939,7 +778,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -952,7 +792,7 @@ "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 8, + "id": 7, "interval": null, "isNew": true, "legend": { @@ -1001,7 +841,7 @@ "instant": false, "interval": "", "intervalFactor": 2, - "legendFormat": "max cpu", + "legendFormat": "CPU limit", "metric": "", "query": "kube_resourcequota{resource=\"limits.cpu\", namespace=\"$project-$domain\", type=\"hard\"}", "refId": "A", @@ -1016,7 +856,7 @@ "instant": false, "interval": "", "intervalFactor": 2, - "legendFormat": "used cpu", + "legendFormat": "CPU requested", "metric": "", "query": "kube_resourcequota{resource=\"limits.cpu\", namespace=\"$project-$domain\", type=\"used\"}", "refId": "B", @@ -1027,7 +867,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "CPU Limits vs usage", + "title": "CPU Limit vs requested by namespace", "tooltip": { "msResolution": true, "shared": true, @@ -1046,7 +886,7 @@ "yaxes": [ { "decimals": null, - "format": "ops", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -1081,7 +921,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -1094,7 +935,7 @@ "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 9, + "id": 8, "interval": null, "isNew": true, "legend": { @@ -1137,30 +978,30 @@ "targets": [ { "datasource": null, - "expr": "kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"hard\"}", + "expr": "(kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"hard\"})*9.5367e-7", "format": "time_series", "hide": false, "instant": false, "interval": "", "intervalFactor": 2, - "legendFormat": "max mem", + "legendFormat": "Memory limit (MiB)", "metric": "", - "query": "kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"hard\"}", + "query": "(kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"hard\"})*9.5367e-7", "refId": "A", "step": 10, "target": "" }, { "datasource": null, - "expr": "kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"used\"}", + "expr": "(kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"used\"})*9.5367e-7", "format": "time_series", "hide": false, "instant": false, "interval": "", "intervalFactor": 2, - "legendFormat": "used mem", + "legendFormat": "Memory requested (MiB)", "metric": "", - "query": "kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"used\"}", + "query": "(kube_resourcequota{resource=\"limits.memory\", namespace=\"$project-$domain\", type=\"used\"})*9.5367e-7", "refId": "B", "step": 10, "target": "" @@ -1169,7 +1010,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Mem Limits vs usage", + "title": "Memory limit vs requested by namespace (MiB)", "tooltip": { "msResolution": true, "shared": true, @@ -1188,7 +1029,7 @@ "yaxes": [ { "decimals": null, - "format": "ops", + "format": "short", "label": null, "logBase": 1, "max": null, @@ -1213,7 +1054,7 @@ ], "repeat": null, "showTitle": true, - "title": "Kubernetes Quota Usage stats" + "title": "Kubernetes Resource Quota Usage" }, { "collapse": true, @@ -1233,7 +1074,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -1246,7 +1088,7 @@ "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 10, + "id": 9, "interval": null, "isNew": true, "legend": { @@ -1289,7 +1131,7 @@ "targets": [ { "datasource": null, - "expr": "sum(kube_pod_container_status_waiting * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"}) by (namespace, label_execution_id, label_task_name, label_node_id, label_workflow_name) > 0", + "expr": "sum(kube_pod_status_phase{phase=\"Pending\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_workflow_name=~\"$workflow\"}) by (namespace, label_task_name, label_node_id, label_workflow_name) > 0", "format": "time_series", "hide": false, "instant": false, @@ -1297,7 +1139,7 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "sum(kube_pod_container_status_waiting * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"}) by (namespace, label_execution_id, label_task_name, label_node_id, label_workflow_name) > 0", + "query": "sum(kube_pod_status_phase{phase=\"Pending\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_workflow_name=~\"$workflow\"}) by (namespace, label_task_name, label_node_id, label_workflow_name) > 0", "refId": "A", "step": 10, "target": "" @@ -1306,7 +1148,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Pending tasks", + "title": "Pending Tasks", "tooltip": { "msResolution": true, "shared": true, @@ -1348,8 +1190,6 @@ } }, { - "aliasColors": {}, - "bars": false, "cacheTimeout": null, "datasource": "${DS_PROM}", "description": null, @@ -1357,66 +1197,80 @@ "error": false, "fieldConfig": { "defaults": { + "color": { + "fixedColor": "none", + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], "thresholds": { "mode": "absolute", - "steps": [] + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] } - } - }, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "overrides": [] }, "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 11, + "id": 10, "interval": null, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "sort": null, - "sortDesc": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, "links": [], "maxDataPoints": 100, "maxPerRow": null, "minSpan": null, - "nullPointMode": "connected", "options": { - "alertThreshold": true, - "dataLinks": [] + "barRadius": 0.0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "orientation": "auto", + "showValue": "true", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", "repeat": null, "repeatDirection": null, - "seriesOverrides": [], "span": 4, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": null, - "expr": "(100 * max(container_memory_rss{image!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / max(kube_pod_container_resource_limits_memory_bytes{container!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0", + "expr": "(100 * (max(container_memory_working_set_bytes{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / max(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0", "format": "time_series", "hide": false, "instant": false, @@ -1424,59 +1278,20 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "(100 * max(container_memory_rss{image!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / max(kube_pod_container_resource_limits_memory_bytes{container!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0", + "query": "(100 * (max(container_memory_working_set_bytes{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / max(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0", "refId": "A", "step": 10, "target": "" } ], - "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "Memory Usage Percentage", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, + "title": "Memory Usage per Task(%)", "transformations": [], "transparent": false, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": 0 - } + "type": "barchart" }, { - "aliasColors": {}, - "bars": false, "cacheTimeout": null, "datasource": "${DS_PROM}", "description": null, @@ -1484,66 +1299,80 @@ "error": false, "fieldConfig": { "defaults": { + "color": { + "fixedColor": "none", + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "fillOpacity": 80, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineWidth": 1, + "scaleDistribution": { + "type": "linear" + } + }, + "mappings": [], "thresholds": { "mode": "absolute", - "steps": [] + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] } - } - }, - "fill": 1, - "grid": { - "threshold1": null, - "threshold1Color": "rgba(216, 200, 27, 0.27)", - "threshold2": null, - "threshold2Color": "rgba(234, 112, 112, 0.22)" + }, + "overrides": [] }, "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 12, + "id": 11, "interval": null, - "isNew": true, - "legend": { - "alignAsTable": false, - "avg": false, - "current": false, - "hideEmpty": false, - "hideZero": false, - "max": false, - "min": false, - "rightSide": false, - "show": true, - "sideWidth": null, - "sort": null, - "sortDesc": false, - "total": false, - "values": false - }, - "lines": true, - "linewidth": 2, "links": [], "maxDataPoints": 100, "maxPerRow": null, "minSpan": null, - "nullPointMode": "connected", "options": { - "alertThreshold": true, - "dataLinks": [] + "barRadius": 0.0, + "barWidth": 0.97, + "groupWidth": 0.7, + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "orientation": "auto", + "showValue": "true", + "stacking": "none", + "tooltip": { + "mode": "single", + "sort": "none" + }, + "xTickLabelRotation": 0, + "xTickLabelSpacing": 0 }, - "percentage": false, - "pointradius": 5, - "points": false, - "renderer": "flot", "repeat": null, "repeatDirection": null, - "seriesOverrides": [], "span": 4, - "stack": false, - "steppedLine": false, "targets": [ { "datasource": null, - "expr": "(100* sum(rate(container_cpu_usage_seconds_total{image!=\"\"}[2m]) * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / sum(kube_pod_container_resource_limits_cpu_cores{container!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0", + "expr": "(100 * (sum(rate(container_cpu_usage_seconds_total{image!=\"\"}[2m]) * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0", "format": "time_series", "hide": false, "instant": false, @@ -1551,55 +1380,18 @@ "intervalFactor": 2, "legendFormat": "", "metric": "", - "query": "(100* sum(rate(container_cpu_usage_seconds_total{image!=\"\"}[2m]) * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\",namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / sum(kube_pod_container_resource_limits_cpu_cores{container!=\"\"} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=\"\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0", + "query": "(100 * (sum(rate(container_cpu_usage_seconds_total{image!=\"\"}[2m]) * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~\"$project-$domain\",label_workflow_name=~\"$workflow\"} * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{container!=\"\"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase=\"Running\"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0", "refId": "A", "step": 10, "target": "" } ], - "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "CPU Usage Percentage", - "tooltip": { - "msResolution": true, - "shared": true, - "sort": 0, - "value_type": "cumulative" - }, + "title": "CPU Usage per Task(%)", "transformations": [], "transparent": false, - "type": "graph", - "xaxis": { - "mode": "time", - "name": null, - "show": true, - "values": [] - }, - "yaxes": [ - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - }, - { - "decimals": null, - "format": "short", - "label": null, - "logBase": 1, - "max": null, - "min": null, - "show": true - } - ], - "yaxis": { - "align": false, - "alignLevel": 0 - } + "type": "barchart" } ], "repeat": null, @@ -1624,7 +1416,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -1637,7 +1430,7 @@ "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 13, + "id": 12, "interval": null, "isNew": true, "legend": { @@ -1697,7 +1490,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "User errors", + "title": "User error rate", "tooltip": { "msResolution": true, "shared": true, @@ -1751,7 +1544,8 @@ "thresholds": { "mode": "absolute", "steps": [] - } + }, + "unit": "" } }, "fill": 1, @@ -1764,7 +1558,7 @@ "gridPos": null, "height": null, "hideTimeOverride": false, - "id": 14, + "id": 13, "interval": null, "isNew": true, "legend": { @@ -1824,7 +1618,7 @@ "thresholds": [], "timeFrom": null, "timeShift": null, - "title": "System errors", + "title": "System error rate", "tooltip": { "msResolution": true, "shared": true, @@ -1868,7 +1662,7 @@ ], "repeat": null, "showTitle": true, - "title": "Error (System vs user)" + "title": "Error (System vs User)" } ], "schemaVersion": 12, @@ -1971,6 +1765,7 @@ }, "timepicker": { "hidden": false, + "nowDelay": null, "refresh_intervals": [ "5s", "10s", diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index 4b0ef1cd98..028f719e71 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -444,6 +444,9 @@ data: disabled: false seedProjects: - flytesnacks + seedProjectsWithDetails: + - description: Default project setup. + name: flytesnacks dataCatalog: disabled: false propeller: @@ -816,7 +819,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: cmRzbzQ4N3RQaWhuMk00OA== + haSharedSecret: SlI1TDFkTXBMaThuc0hlSQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1247,7 +1250,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: dc6e26fec37cad413a92bf06f2840ea1e497284312275ff06e22b152dee1566b + checksum/configuration: a823eaadac5f3a4358c8acf628ebeb3719f88312af520d2c253de2579dff262d checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1413,7 +1416,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 51528951e92c2bf712bbde990941593aae1fcf72144a1fe944c312ddad86e161 + checksum/secret: ffc8aa05a602edd8f9b1d7ef35aa1cc5e383bceb9b91307eef99e86f53e13d4e labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index a44f3dad5d..c8b8e1c93a 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -433,6 +433,9 @@ data: disabled: false seedProjects: - flytesnacks + seedProjectsWithDetails: + - description: Default project setup. + name: flytesnacks dataCatalog: disabled: false propeller: @@ -798,7 +801,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: T1I2Q2tTcmREVG15MldGUQ== + haSharedSecret: YjdMdE9yejJzZ2xXSDFBRQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1196,7 +1199,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: a6f3ea502338c626b7824453ce7dc8b6fcd441d68865c075e2e74d797bc607fa + checksum/configuration: c2649df6bcb523f120c73b0fdeec5d9516f555eab12e4eae78b04dea2cf2abae checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1362,7 +1365,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: d723e395edc0fd2f221b9088efffe0d1f4dfabdef9892065fdabe12233362cf5 + checksum/secret: 956ac1b58c049a630c94605eedaba7ba9de3fc01233701ef403ab4bf24fe2a7a labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index d87f3a1642..1038da1f64 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: ZnltNHNiZ01NRFNkb1RlMA== + haSharedSecret: YUpzb25xNTM1eml3Rmpueg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: eeab364c20a0e8ad5a1526ccd7ddbd1d5a442087e7267c4d761279102b81be21 + checksum/secret: 2720f13bd64051a7acb512e59e426b9f6c5f6c3c7d1d9a3a423e2df4cf9bab46 labels: app: docker-registry release: flyte-sandbox diff --git a/docs/community/contribute/contribute_docs.md b/docs/community/contribute/contribute_docs.md index 3b5d996abf..f97152032b 100644 --- a/docs/community/contribute/contribute_docs.md +++ b/docs/community/contribute/contribute_docs.md @@ -57,3 +57,66 @@ Deployment and API docs mostly use reStructured Text. For more information, see You can cross-reference multiple Python modules, functions, classes, methods, and global data in documentations. For more information, see the [Sphinx documentation](https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing-python-objects). +### Quickstart + +Flyte Documentation is primarily maintained in two locations: [flyte](https://github.com/flyteorg/flyte) and [flytesnacks](https://github.com/flyteorg/flytesnacks). + +#### Tips +The following are some tips to include various content: +* **Images** + Flyte maintain all static resources in [static-resources-repo](https://github.com/flyteorg/static-resources). + You should upload your images to this repo and open the PR, and then refer to the image in the documentation. + Notice that the image URL should be in the format `https://raw.githubusercontent.com/flyteorg/static-resources//`. +* **Source code references (Link format)**
+ `.rst` example: + ```{code-block} + .. raw:: html + + a href="https://github.com/flyteorg//blob//#L-L">View source code on GitHub + ``` + + `.md` example: + ```{code-block} + [View source code on GitHub]("https://github.com/flyteorg//blob//#L-L") + ``` +* **Source code references (Embedded format)**
+ `.rst` example: + ```{code-block} + .. rli:: https://raw.githubusercontent.com/flyteorg/// + :lines: - + ``` + + `.md` example: + ````{code-block} + ```{rli} https://raw.githubusercontent.com/flyteorg/// + lines: - + ``` + ```` + +This way, the nested code block is properly displayed without breaking the Markdown structure. + +#### Open a pull request +[This is an example PR](https://github.com/flyteorg/flyte/pull/5844) + +Each time you update your PR, it triggers the CI build, so there’s no need to build the docs locally. Flyte uses the CI process `"docs/readthedocs.org:flyte"`, which builds the documentation after each PR. +Be sure to include the following CI-build preview link in your PR description so reviewers can easily preview the changes: +```{code-block} +https://flyte--.org.readthedocs.build/en//.html +``` +The relative path is based on the `docs` directory. +For example, if the full path is `flyte/docs/user_guide/advanced_composition/chaining_flyte_entities.md`, then the relative path would be `user_guide/advanced_composition/chaining_flyte_entities` + `.html`. + +#### Important note +In the `flytesnacks` repository, most Python comments using `# xxxx` are not imported into the documentation. +You may notice some overlap between `flytesnacks` and `flyte` docs, but what is displayed primarily comes from the`flyte` repository. + +Otherwise, take care of the following points: +````{important} +* Make sure `:lines:` are aligned correctly. +* Use gitsha to specify the example code instead of using master branch or relative path, as this ensures 100% accuracy. +* Build the documentation by submitting a PR instead of building it locally. +* For `flytesnacks`, run `make fmt` before submitting the PR. +* Before uploading commits, use `git commit -s` to sign off. This step is often forgotten during the first submission. +* Run `codespell` on the modified files to check for any spelling mistakes before pushing. +* When using reference code or images, use gitsha along with GitHub raw content links. +```` diff --git a/docs/deployment/configuration/general.rst b/docs/deployment/configuration/general.rst index 0b97f8b8ff..c68f2a8be6 100644 --- a/docs/deployment/configuration/general.rst +++ b/docs/deployment/configuration/general.rst @@ -66,7 +66,7 @@ Notice how in this example we are defining a new PodTemplate inline, which allow `V1PodSpec `__ and also define the name of the primary container, labels, and annotations. -The term compile-time here refers to the fact that the pod template definition is part of the `TaskSpec `__. +The term compile-time here refers to the fact that the pod template definition is part of the `TaskSpec `__. ******************** Runtime PodTemplates @@ -88,7 +88,7 @@ initializes a K8s informer internally to track system PodTemplate updates `aware `__ of the latest PodTemplate definitions in the K8s environment. You can find this setting in `FlytePropeller `__ -config map, which is not set by default. +config map, which is not set by default. An example configuration is: @@ -101,14 +101,14 @@ An example configuration is: image: "cr.flyte.org/flyteorg/flytecopilot:v0.0.15" start-timeout: "30s" default-pod-template-name: - + Create a PodTemplate resource ============================= -Flyte recognizes PodTemplate definitions with the ``default-pod-template-name`` at two granularities. +Flyte recognizes PodTemplate definitions with the ``default-pod-template-name`` at two granularities. 1. A system-wide configuration can be created in the same namespace that - FlytePropeller is running in (typically `flyte`). + FlytePropeller is running in (typically `flyte`). 2. PodTemplates can be applied from the same namespace that the Pod will be created in. FlytePropeller always favors the PodTemplate with the more specific namespace. For example, a Pod created in the ``flytesnacks-development`` @@ -128,6 +128,9 @@ as the base container configuration for all primary containers. If both containe names exist in the default PodTemplate, Flyte first applies the default configuration, followed by the primary configuration. +Note: Init containers can be configured with similar granularity using "default-init" +and "primary-init" init container names. + The ``containers`` field is required in each k8s PodSpec. If no default configuration is desired, specifying a container with a name other than "default" or "primary" (for example, "noop") is considered best practice. Since Flyte only @@ -193,7 +196,7 @@ where you start the Pod. An example PodTemplate is shown: .. code-block:: yaml - + apiVersion: v1 kind: PodTemplate metadata: @@ -217,7 +220,7 @@ In addition, the K8s plugin configuration in FlytePropeller defines the default Pod Labels, Annotations, and enables the host networking. .. code-block:: yaml - + plugins: k8s: default-labels: @@ -230,7 +233,7 @@ Pod Labels, Annotations, and enables the host networking. To construct a Pod, FlytePropeller initializes a Pod definition using the default PodTemplate. This definition is applied to the K8s plugin configuration values, and any task-specific configuration is overlaid. During the process, when lists -are merged, values are appended and when maps are merged, the values are overridden. +are merged, values are appended and when maps are merged, the values are overridden. The resultant Pod using the above default PodTemplate and K8s Plugin configuration is shown: .. code-block:: yaml diff --git a/docs/deployment/configuration/monitoring.rst b/docs/deployment/configuration/monitoring.rst index 48239288f4..1e9e26763e 100644 --- a/docs/deployment/configuration/monitoring.rst +++ b/docs/deployment/configuration/monitoring.rst @@ -5,7 +5,7 @@ Monitoring .. tags:: Infrastructure, Advanced -.. tip:: The Flyte core team publishes and maintains Grafana dashboards built using Prometheus data sources, which can be found `here `__. +.. tip:: The Flyte core team publishes and maintains Grafana dashboards built using Prometheus data sources. You can import them to your Grafana instance from the `Grafana marketplace `__. Metrics for Executions ====================== @@ -87,53 +87,81 @@ Flyte Backend is written in Golang and exposes stats using Prometheus. The stats Both ``flyteadmin`` and ``flytepropeller`` are instrumented to expose metrics. To visualize these metrics, Flyte provides three Grafana dashboards, each with a different focus: -- **User-facing dashboards**: Dashboards that can be used to triage/investigate/observe performance and characteristics of workflows and tasks. - The user-facing dashboard is published under ID `13980 `__ in the Grafana marketplace. +- **User-facing dashboard**: it can be used to investigate performance and characteristics of workflow and task executions. It's published under ID `22146 `__ in the Grafana marketplace. - **System Dashboards**: Dashboards that are useful for the system maintainer to investigate the status and performance of their Flyte deployments. These are further divided into: - - `DataPlane/FlytePropeller `__: execution engine status and performance. - - `ControlPlane/Flyteadmin `__: API-level monitoring. + - Data plane (``flytepropeller``) - `21719 `__: execution engine status and performance. + - Control plane (``flyteadmin``) - `21720 `__: API-level monitoring. -The corresponding JSON files for each dashboard are also located at ``deployment/stats/prometheus``. +The corresponding JSON files for each dashboard are also located in the ``flyte`` repository at `deployment/stats/prometheus `__. .. note:: The dashboards are basic dashboards and do not include all the metrics exposed by Flyte. Feel free to use the scripts provided `here `__ to improve and -hopefully- contribute the improved dashboards. -How to use the dashboards -~~~~~~~~~~~~~~~~~~~~~~~~~ - -1. We recommend installing and configuring the Prometheus operator as described in `their docs `__. -This is especially true if you plan to use the Service Monitors provided by the `flyte-core `__ Helm chart. - -2. Enable the Prometheus instance to use Service Monitors in the namespace where Flyte is running, configuring the following keys in the ``prometheus`` resource: - -.. code-block:: yaml - - spec: - serviceMonitorSelector: {} - serviceMonitorNamespaceSelector: {} - -.. note:: - - The above example configuration lets Prometheus use any ``ServiceMonitor`` in any namespace in the cluster. Adjust the configuration to reduce the scope if needed. - -3. Once you have installed and configured the Prometheus operator, enable the Service Monitors in the Helm chart by configuring the following keys in your ``values`` file: - -.. code-block:: yaml - - flyteadmin: - serviceMonitor: - enabled: true - - flytepropeller: - serviceMonitor: - enabled: true - +Setup instructions +~~~~~~~~~~~~~~~~~~ + +The dashboards rely on a working Prometheus deployment with access to your Kubernetes cluster and Flyte pods. +Additionally, the user dashboard uses metrics that come from ``kube-state-metrics``. Both of these requirements can be fulfilled by installing the `kube-prometheus-stack `__. + +Once the prerequisites are in place, follow the instructions in this section to configure metrics scraping for the corresponding Helm chart: + +.. tabs:: + + .. group-tab:: flyte-core + + Save the following in a ``flyte-monitoring-overrides.yaml`` file and run a ``helm upgrade`` operation pointing to that ``--values`` file: + + .. code-block:: yaml + + flyteadmin: + serviceMonitor: + enabled: true + labels: + release: kube-prometheus-stack #This is particular to the kube-prometheus-stacl + selectorLabels: + - app.kubernetes.io/name: flyteadmin + flytepropeller: + serviceMonitor: + enabled: true + labels: + release: kube-prometheus-stack + selectorLabels: + - app.kubernetes.io/name: flytepropeller + service: + enabled: true + + The above configuration enables the ``serviceMonitor`` that Prometheus can then use to automatically discover services and scrape metrics from them. + + .. group-tab:: flyte-binary + + Save the following in a ``flyte-monitoring-overrides.yaml`` file and run a ``helm upgrade`` operation pointing to that ``--values`` file: + + .. code-block:: yaml + + configuration: + inline: + propeller: + prof-port: 10254 + metrics-prefix: "flyte:" + scheduler: + profilerPort: 10254 + metricsScope: "flyte:" + flyteadmin: + profilerPort: 10254 + service: + extraPorts: + - name: http-metrics + protocol: TCP + port: 10254 + + The above configuration enables the ``serviceMonitor`` that Prometheus can then use to automatically discover services and scrape metrics from them. + .. note:: By default, the ``ServiceMonitor`` is configured with a ``scrapeTimeout`` of 30s and ``interval`` of 60s. You can customize these values if needed. -With the above configuration in place you should be able to import the dashboards in your Grafana instance. +With the above configuration completed, you should be able to import the dashboards in your Grafana instance. diff --git a/docs/deployment/configuration/performance.rst b/docs/deployment/configuration/performance.rst index 8c9c31030d..db4af7ea98 100644 --- a/docs/deployment/configuration/performance.rst +++ b/docs/deployment/configuration/performance.rst @@ -270,7 +270,7 @@ The hash shard Strategy, denoted by ``type: Hash`` in the configuration below, u type: Hash # use the "hash" shard strategy shard-count: 4 # the total number of shards -The project and domain shard strategies, denoted by ``type: project`` and ``type: domain`` respectively, use the Flyte workflow project and domain metadata to shard Flyte workflows. These shard strategies are configured using a ``per-shard-mapping`` option, which is a list of IDs. Each element in the ``per-shard-mapping`` list defines a new shard, and the ID list assigns responsibility for the specified IDs to that shard. A shard configured as a single wildcard ID (i.e. ``*``) is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and, on that shard, there must be only one ID, namely the wildcard. +The project and domain shard strategies, denoted by ``type: Project`` and ``type: Domain`` respectively, use the Flyte workflow project and domain metadata to shard Flyte workflows. These shard strategies are configured using a ``per-shard-mapping`` option, which is a list of IDs. Each element in the ``per-shard-mapping`` list defines a new shard, and the ID list assigns responsibility for the specified IDs to that shard. A shard configured as a single wildcard ID (i.e. ``*``) is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and, on that shard, there must be only one ID, namely the wildcard. .. code-block:: yaml @@ -281,7 +281,7 @@ The project and domain shard strategies, denoted by ``type: project`` and ``type # pod and scanning configuration redacted # ... shard: - type: project # use the "project" shard strategy + type: Project # use the "Project" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - flytesnacks @@ -298,7 +298,7 @@ The project and domain shard strategies, denoted by ``type: project`` and ``type # pod and scanning configuration redacted # ... shard: - type: domain # use the "domain" shard strategy + type: Domain # use the "Domain" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - production diff --git a/docs/user_guide/advanced_composition/conditionals.md b/docs/user_guide/advanced_composition/conditionals.md index 3afca88772..27fb05357b 100644 --- a/docs/user_guide/advanced_composition/conditionals.md +++ b/docs/user_guide/advanced_composition/conditionals.md @@ -18,9 +18,9 @@ To clone and run the example code on this page, see the [Flytesnacks repo][flyte To begin, import the necessary libraries. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 1-3 +:lines: 1-4 ``` ## Simple branch @@ -29,9 +29,9 @@ In this example, we introduce two tasks, `calculate_circle_circumference` and `calculate_circle_area`. The workflow dynamically chooses between these tasks based on whether the input falls within the fraction range (0-1) or not. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 11-37 +:lines: 12-38 ``` ## Multiple branches @@ -40,7 +40,7 @@ We establish an `if` condition with multiple branches, which will result in a fa It's important to note that any `conditional` statement in Flyte is expected to be complete, meaning that all possible branches must be accounted for. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: shape_properties_with_multiple_branches ``` @@ -55,9 +55,9 @@ a convention also observed in other libraries. ## Consuming the output of a conditional Here, we write a task that consumes the output returned by a `conditional`. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 66-81 +:lines: 67-85 ``` ## Using the output of a previous task in a conditional @@ -66,9 +66,9 @@ You can check if a boolean returned from the previous task is `True`, but unary operations are not supported directly. Instead, use the `is_true`, `is_false` and `is_none` methods on the result. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 89-119 +:lines: 93-123 ``` :::{note} @@ -79,7 +79,7 @@ Inputs and outputs are automatically encapsulated in a special object known as { ## Using boolean workflow inputs in a conditional You can directly pass a boolean to a workflow. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: boolean_input_wf ``` @@ -92,9 +92,9 @@ This special object enables it to exhibit additional behavior. You can run the workflows locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 129-135 +:lines: 133-139 ``` ## Nested conditionals @@ -102,9 +102,9 @@ You can run the workflows locally as follows: You can nest conditional sections arbitrarily inside other conditional sections. However, these nested sections can only be in the `then` part of a `conditional` block. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 142-164 +:lines: 146-168 ``` ## Using the output of a task in a conditional @@ -112,16 +112,16 @@ However, these nested sections can only be in the `then` part of a `conditional` Let's write a fun workflow that triggers the `calculate_circle_circumference` task in the event of a "heads" outcome, and alternatively, runs the `calculate_circle_area` task in the event of a "tail" outcome. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py :pyobject: consume_task_output ``` You can run the workflow locally as follows: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 181-188 +:lines: 216-225 ``` ## Running a noop task in a conditional @@ -138,9 +138,9 @@ task-plugins: ``` ::: -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/89bf7bc7788802097904c5f9ffb75ba70ef980a6/examples/advanced_composition/advanced_composition/conditional.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py :caption: advanced_composition/conditional.py -:lines: 197-209 +:lines: 200-212 ``` ## Run the example on the Flyte cluster @@ -149,49 +149,49 @@ To run the provided workflows on the Flyte cluster, use the following commands: ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ shape_properties --radius 3.0 ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ shape_properties_with_multiple_branches --radius 11.0 ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ shape_properties_accept_conditional_output --radius 0.5 ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ boolean_wf ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ boolean_input_wf --boolean_input ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ nested_conditions --radius 0.7 ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ consume_task_output --radius 0.4 --seed 7 ``` ``` pyflyte run --remote \ - https://raw.githubusercontent.com/flyteorg/flytesnacks/89bf7bc7788802097904c5f9ffb75ba70ef980a6/examples/advanced_composition/advanced_composition/conditional.py \ + https://raw.githubusercontent.com/flyteorg/flytesnacks/656e63d1c8dded3e9e7161c7af6425e9fcd43f56/examples/advanced_composition/advanced_composition/conditional.py \ noop_in_conditional --radius 0.4 --seed 5 ``` diff --git a/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst b/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst index 6de45566f0..f564b9d12c 100644 --- a/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst +++ b/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst @@ -15,18 +15,20 @@ Introduction A Flyte :ref:`workflow ` is represented as a Directed Acyclic Graph (DAG) of interconnected Nodes. Flyte supports a robust collection of Node types to ensure diverse functionality. - ``TaskNodes`` support a plugin system to externally add system integrations. -- ``BranchNodes`` allow altering the control flow during runtime; pruning downstream evaluation paths based on input. +- ``BranchNodes`` allow altering the control flow during runtime; pruning downstream evaluation paths based on input. - ``DynamicNodes`` add nodes to the DAG. - ``WorkflowNodes`` allow embedding workflows within each other. -FlytePropeller is responsible for scheduling and tracking execution of Flyte workflows. It is implemented using a K8s controller that follows the reconciler pattern. +FlytePropeller is responsible for scheduling and tracking execution of Flyte workflows. It is implemented using a K8s controller that follows the reconciler pattern. .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/common/reconciler-pattern.png In this scheme, resources are periodically evaluated and the goal is to transition from the observed state to a requested state. -In our case, workflows are the resources, whose desired stated (*workflow definition*) is expressed using Flyte's SDK. Workflows are iteratively evaluated to transition from the current state to success. During each evaluation loop, the current workflow state is established as the `phase of workflow nodes `__ and subsequent tasks, and FlytePropeller performs operations to transition this state to success. -The operations may include scheduling (or rescheduling) node executions, evaluating dynamic or branch nodes, etc. +In our case, workflows are the resources, whose desired stated (*workflow definition*) is expressed using Flyte's SDK. Workflows are iteratively evaluated to transition from the current state to success. +During each evaluation loop, the current workflow state is established as the `phase of workflow nodes `__ and subsequent tasks, +and FlytePropeller performs operations to transition this state to success. +The operations may include scheduling (or rescheduling) node executions, evaluating dynamic or branch nodes, etc. By using a simple yet robust mechanism, FlytePropeller can scale to manage a large number of concurrent workflows without significant performance degradation. @@ -43,36 +45,36 @@ FlyteAdmin is the common entry point, where initialization of FlyteWorkflow Cust FlyteWorkflow CRD / K8s Integration ----------------------------------- -Workflows in Flyte are maintained as `Custom Resource Definitions (CRDs) `__ in Kubernetes, which are stored in the backing ``etcd`` key-value store. Each workflow execution results in the creation of a new ``flyteworkflow`` CR (Custom Resource) which maintains its state for the duration of the execution. CRDs provide variable definitions to describe both resource specifications (``spec``) and status (``status``). The ``flyteworkflow`` CRD uses the ``spec`` subsection to detail the workflow DAG, embodying node dependencies, etc. +Workflows in Flyte are maintained as `Custom Resource Definitions (CRDs) `__ in Kubernetes, which are stored in the backing ``etcd`` key-value store. Each workflow execution results in the creation of a new ``flyteworkflow`` CR (Custom Resource) which maintains its state for the duration of the execution. CRDs provide variable definitions to describe both resource specifications (``spec``) and status (``status``). The ``flyteworkflow`` CRD uses the ``spec`` subsection to detail the workflow DAG, embodying node dependencies, etc. **Example** 1. Execute an `example workflow `__ on a remote Flyte cluster: -.. code-block:: bash +.. code-block:: bash pyflyte run --remote example.py training_workflow --hyperparameters '{"C": 0.4}' 2. Verify there's a new Custom Resource on the ``flytesnacks-development`` namespace (this is, the workflow belongs to the ``flytesnacks`` project and the ``development`` domain): -.. code-block:: bash +.. code-block:: bash kubectl get flyteworkflows.flyte.lyft.com -n flytesnacks-development Example output: -.. code-block:: bash +.. code-block:: bash NAME AGE - f7616dc75400f43e6920 3h42m + f7616dc75400f43e6920 3h42m 3. Describe the contents of the Custom Resource, for example the ``spec`` section: -.. code-block:: bash +.. code-block:: bash - kubectl describe flyteworkflows.flyte.lyft.com f7616dc75400f43e6920 -n flytesnacks-development + kubectl describe flyteworkflows.flyte.lyft.com f7616dc75400f43e6920 -n flytesnacks-development -.. code-block:: json +.. code-block:: json "spec": { "connections": { @@ -93,7 +95,7 @@ Example output: The status subsection tracks workflow metadata including overall workflow status, node/task phases, status/phase transition timestamps, etc. -.. code-block:: json +.. code-block:: json "status": { "dataDir": "gs://flyteontf-gcp-data-116223838137/metadata/propeller/flytesnacks-development-f7616dc75400f43e6920", @@ -123,7 +125,7 @@ The status subsection tracks workflow metadata including overall workflow status }, -K8s exposes a powerful controller/operator API that enables entities to track creation/updates over a specific resource type. FlytePropeller uses this API to track FlyteWorkflows, meaning every time an instance of the ``flyteworkflow`` CR is created/updated, the FlytePropeller instance is notified. +K8s exposes a powerful controller/operator API that enables entities to track creation/updates over a specific resource type. FlytePropeller uses this API to track FlyteWorkflows, meaning every time an instance of the ``flyteworkflow`` CR is created/updated, the FlytePropeller instance is notified. .. note:: @@ -138,7 +140,7 @@ FlytePropeller supports concurrent execution of multiple, unique workflows using The WorkQueue is a FIFO queue storing workflow ID strings that require a lookup to retrieve the FlyteWorkflow CR to ensure up-to-date status. A workflow may be added to the queue in a variety of circumstances: #. A new FlyteWorkflow CR is created or an existing instance is updated -#. The K8s Informer detects a workflow timeout or failed liveness check during its periodic resync operation on the FlyteWorkflow. +#. The K8s Informer detects a workflow timeout or failed liveness check during its periodic resync operation on the FlyteWorkflow. #. A FlytePropeller worker experiences an error during a processing loop #. The WorkflowExecutor observes a completed downstream node #. A NodeHandler observes state change and explicitly enqueues its owner. (For example, K8s pod informer observes completion of a task.) @@ -153,15 +155,15 @@ The WorkflowExecutor is responsible for handling high-level workflow operations. NodeExecutor ------------ -The NodeExecutor is executed on a single node, beginning with the workflow's start node. It traverses the workflow using a visitor pattern with a modified depth-first search (DFS), evaluating each node along the path. A few examples of node evaluation based on phase include: +The NodeExecutor is executed on a single node, beginning with the workflow's start node. It traverses the workflow using a visitor pattern with a modified depth-first search (DFS), evaluating each node along the path. A few examples of node evaluation based on phase include: * Successful nodes are skipped * Unevaluated nodes are queued for processing -* Failed nodes may be reattempted up to a configurable threshold. +* Failed nodes may be reattempted up to a configurable threshold. There are many configurable parameters to tune evaluation criteria including max parallelism which restricts the number of nodes which may be scheduled concurrently. Additionally, nodes may be retried to ensure recoverability on failure. -Go to the `Optimizing Performance `__ section for more information on how to tune Propeller parameters. +Go to the `Optimizing Performance `__ section for more information on how to tune Propeller parameters. The NodeExecutor is also responsible for linking data readers/writers to facilitate data transfer between node executions. The data transfer process occurs automatically within Flyte, using efficient K8s events rather than a polling listener pattern which incurs more overhead. Relatively small amounts of data may be passed between nodes inline, but it is more common to pass data URLs to backing storage. A component of this is writing to and checking the data cache, which facilitates the reuse of previously completed evaluations. @@ -196,4 +198,3 @@ Every operation that Propeller performs makes use of a plugin. The following dia .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/flyte/concepts/architecture/flytepropeller_plugins_architecture.png - \ No newline at end of file diff --git a/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst b/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst index a923403625..27f6b3e344 100644 --- a/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst +++ b/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst @@ -35,7 +35,7 @@ Components Schedule Management ------------------- -This component supports creation/activation and deactivation of schedules. Each schedule is tied to a launch plan and is versioned in a similar manner. The schedule is created or its state is changed to activated/deactivated whenever the `admin API `__ is invoked for it with `ACTIVE/INACTIVE state `__. This is done either through `flytectl `__ or through any other client that calls the GRPC API. +This component supports creation/activation and deactivation of schedules. Each schedule is tied to a launch plan and is versioned in a similar manner. The schedule is created or its state is changed to activated/deactivated whenever the `admin API `__ is invoked for it with `ACTIVE/INACTIVE state `__. This is done either through `flytectl `__ or through any other client that calls the GRPC API. The API is similar to a launchplan, ensuring that only one schedule is active for a given launchplan. diff --git a/docs/user_guide/concepts/control_plane/domains.rst b/docs/user_guide/concepts/control_plane/domains.rst index bb306924dd..878d8abc2f 100644 --- a/docs/user_guide/concepts/control_plane/domains.rst +++ b/docs/user_guide/concepts/control_plane/domains.rst @@ -5,9 +5,8 @@ Domains .. tags:: Basic, Glossary -Domains provide an abstraction to isolate resources and feature configuration for different -deployment environments. +Domains are fixed and unique at the global level, and provide an abstraction to isolate resources and feature configuration for different deployment environments. For example: We develop and deploy Flyte workflows in development, staging, and production. We configure Flyte domains with those names, and specify lower resource limits on the development and staging domains than production domains. -We also use domains to disable launch plans and schedules from development and staging domains, since those features are typically meant for production deployments. \ No newline at end of file +We also use domains to disable launch plans and schedules from development and staging domains, since those features are typically meant for production deployments. diff --git a/docs/user_guide/concepts/main_concepts/data_management.rst b/docs/user_guide/concepts/main_concepts/data_management.rst index 0d4edbd0a8..6bb6eee730 100644 --- a/docs/user_guide/concepts/main_concepts/data_management.rst +++ b/docs/user_guide/concepts/main_concepts/data_management.rst @@ -9,47 +9,47 @@ Understand How Flyte Handles Data Types of Data ============= -There are two parts to the data in Flyte: +In Flyte, data is categorized into metadata and raw data to optimize data handling and improve performance and security. -1. Metadata +* **Metadata**: Small values, like integers and strings, are treated as "stack parameters" (passed by value). This metadata is globally accessible to Flyte components (FlytePropeller, FlyteAdmin, and other running pods/jobs). Each entry is limited to 10MB and is passed directly between tasks. On top of that, metadata allow in-memory computations for branches, partial outputs, and composition of multiple outputs as input for other tasks. -* It consists of data about inputs to a task, and other artifacts. -* It is configured globally for FlytePropeller, FlyteAdmin etc., and the running pods/jobs need access to this bucket to get the data. +* **Raw data**: Larger data, such as files and dataframes, are treated as "heap parameters" (passed by reference). Flyte stores raw data in an object store (e.g., S3), uploading it on first use and passing only a reference thereafter. Tasks can then access this data via Flyte’s automated download or streaming, enabling efficient access to large datasets without needing to transfer full copies. -2. Raw data +*Source code reference for auto-offloading value sizes limitation:* -* It is the actual data (such as the Pandas DataFrame, Spark DataFrame, etc.). -* Raw data paths are unique for every execution, and the prefixes can be modified per execution. -* None of the Flyte control plane components would access the raw data. This provides great separation of data between the control plane and the data plane. +.. raw:: html -.. note: - Metadata and raw data can be present in entirely separate buckets. + View source code on GitHub +Data Flow and Security +~~~~~~~~~~~~~~~~~~~~~~ -Let us consider a simple Python task: +Flyte’s data separation avoids bottlenecks and security risks: + +* **Metadata** remains within Flyte’s control plane, making it accessible through the Flyte Console or CLI. +* **Raw Data** is accessible only by tasks, stored securely in an external blob store, preventing Flyte’s control plane from directly handling large data files. + +Moreover, a unique property of this separation is that all meta values are read by FlytePropeller engine and available on the FlyteConsole or CLI from the control plane. + +Example +~~~~~~~ + +Consider a basic Flyte task: .. code-block:: python - @task - def my_task(m: int, n: str, o: FlyteFile) -> pd.DataFrame: - ... + @task + def my_task(m: int, n: str, o: FlyteFile) -> pd.DataFrame: + ... -In the above code sample, ``m``, ``n``, ``o`` are inputs to the task. -``m`` of type ``int`` and ``n`` of type ``str`` are simple primitive types, while ``o`` is an arbitrarily sized file. -All of them from Flyte's point of view are ``data``. -The difference lies in how Flyte stores and passes each of these data items. -For every task that receives input, Flyte sends an **Inputs Metadata** object, which contains all the primitive or simple scalar values inlined, but in the case of -complex, large objects, they are offloaded and the `Metadata` simply stores a reference to the object. In our example, ``m`` and ``n`` are inlined while -``o`` and the output ``pd.DataFrame`` are offloaded to an object store, and their reference is captured in the metadata. +In this task, ``m``, ``n``, and ``o`` are inputs: ``m`` (int) and ``n`` (str) are simple types, while ``o`` is a large, arbitrarily sized file. +Flyte treats each differently: -`Flytekit TypeTransformers` make it possible to use complex objects as if they are available locally - just like persistent filehandles. But Flyte backend only deals with -the references. +* Metadata: Small values like ``m`` and ``n`` are inlined within Flyte’s metadata and passed directly between tasks. +* Raw data: Objects like ``o`` and the output pd.DataFrame are offloaded to an object store (e.g., S3), with only references retained in metadata. -Thus, primitive data types and references to large objects fall under Metadata - `Meta input` or `Meta output`, and the actual large object is known as **Raw data**. -A unique property of this separation is that all `meta values` are read by FlytePropeller engine and available on the FlyteConsole or CLI from the control plane. -`Raw` data is not read by any of the Flyte components and hence it is possible to store it in a completely separate blob storage or alternate stores, which can't be accessed by Flyte control plane components -but can be accessed by users's container/tasks. +Flytekit TypeTransformers make it possible to use complex objects as if they are available locally, just like persistent filehandles. However, the Flyte backend only deals with the references. Raw Data Prefix ~~~~~~~~~~~~~~~ @@ -57,22 +57,17 @@ Raw Data Prefix Every task can read/write its own data files. If ``FlyteFile`` or any natively supported type like ``pandas.DataFrame`` is used, Flyte will automatically offload and download data from the configured object-store paths. These paths are completely customizable per `LaunchPlan` or `Execution`. -- The default Rawoutput path (prefix in an object store like S3/GCS) can be configured during registration as shown in :std:ref:`flytectl_register_files`. +* The default Rawoutput path (prefix in an object store like S3/GCS) can be configured during registration as shown in :std:ref:`flytectl_register_files`. The argument ``--outputLocationPrefix`` allows us to set the destination directory for all the raw data produced. Flyte will create randomized folders in this path to store the data. -- To override the ``RawOutput`` path (prefix in an object store like S3/GCS), you can specify an alternate location when invoking a Flyte execution, as shown in the following screenshot of the LaunchForm in FlyteConsole: +* To override the ``RawOutput`` path (prefix in an object store like S3/GCS), + you can specify an alternate location when invoking a Flyte execution, as shown in the following screenshot of the LaunchForm in FlyteConsole: - .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/flyte/concepts/data_movement/launch_raw_output.png + .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/9cb3d56d7f3b88622749b41ff7ad2d3ebce92726/flyte/concepts/data_movement/launch_raw_output.png -- In the sandbox, the default Rawoutput-prefix is configured to be the root of the local bucket. Hence Flyte will write all the raw data (reference types like blob, file, df/schema/parquet, etc.) under a path defined by the execution. +* In the sandbox, the default Rawoutput-prefix is configured to be the root of the local bucket. + Hence Flyte will write all the raw data (reference types like blob, file, df/schema/parquet, etc.) under a path defined by the execution. -Metadata -~~~~~~~~ - -Metadata in Flyte is critical to enable the passing of data between tasks. It allows to perform in-memory computations for branches or send partial outputs from one task to another or compose outputs from multiple tasks into one input to be sent to a task. - -Thus, metadata is restricted due to its omnipresence. Each `meta output`/`input` cannot be larger than 1MB. If you have `List[int]`, it cannot be larger than 1MB, considering other input entities. In scenarios where large lists or strings need to be sent between tasks, file abstraction is preferred. - ``LiteralType`` & Literals ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -154,16 +149,86 @@ The illustration below explains how data flows from engine to the task and how t We could use fast metadata stores to speed up data movement or exploit locality. Between Flytepropeller and Tasks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/flyte/concepts/data_movement/flyte_data_movement.png +.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/9cb3d56d7f3b88622749b41ff7ad2d3ebce92726/flyte/concepts/data_movement/flyte_data_movement.png Between Tasks -~~~~~~~~~~~~~~ +~~~~~~~~~~~~~ + +.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/9cb3d56d7f3b88622749b41ff7ad2d3ebce92726/flyte/concepts/data_movement/flyte_data_transfer.png + +Practical Example +~~~~~~~~~~~~~~~~~ -.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/flyte/concepts/data_movement/flyte_data_transfer.png +Let's consider a simple example where we have some tasks that needs to operate huge dataframes. +The first task reads a file from the object store, shuffles the data, saves to local disk, and passes the path to the next task. + +.. code-block:: python + + @task() + def task_read_and_shuffle_file(input_file: FlyteFile) -> FlyteFile: + """ + Reads the input file as a DataFrame, shuffles the rows, and writes the shuffled DataFrame to a new file. + """ + input_file.download() + df = pd.read_csv(input_file.path) + + # Shuffle the DataFrame rows + shuffled_df = df.sample(frac=1).reset_index(drop=True) + + output_file_path = "data_shuffle.csv" + shuffled_df.to_csv(output_file_path, index=False) + + return FlyteFile(output_file_path) + ... + +The second task reads the file from the previous task, removes a column, saves to local disk, and returns the path. + +.. code-block:: python + + @task() + def task_remove_column(input_file: FlyteFile, column_name: str) -> FlyteFile: + """ + Reads the input file as a DataFrame, removes a specified column, and outputs it as a new file. + """ + input_file.download() + df = pd.read_csv(input_file.path) + + # remove column + if column_name in df.columns: + df = df.drop(columns=[column_name]) + + output_file_path = "data_finished.csv" + df.to_csv(output_file_path, index=False) + + return FlyteFile(output_file_path) + ... + +And here is the workflow: + +.. code-block:: python + + @workflow + def wf() -> FlyteFile: + existed_file = FlyteFile("s3://custom-bucket/data.csv") + shuffled_file = task_read_and_shuffle_file(input_file=existed_file) + result_file = task_remove_column(input_file=shuffled_file, column_name="County") + return result_file + ... + +This example shows how to access an existing file in a MinIO bucket from the Flyte Sandbox and pass it between tasks with ``FlyteFile``. +When a workflow outputs a local file as a ``FlyteFile``, Flyte automatically uploads it to MinIO and provides an S3 URL for downstream tasks, no manual uploads needed. Take a look at the following: + +First task output metadata: + +.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/9cb3d56d7f3b88622749b41ff7ad2d3ebce92726/flyte/concepts/data_movement/flyte_data_movement_example_output.png + +Second task input metadata: + +.. image:: https://raw.githubusercontent.com/flyteorg/static-resources/9cb3d56d7f3b88622749b41ff7ad2d3ebce92726/flyte/concepts/data_movement/flyte_data_movement_example_input.png Bringing in Your Own Datastores for Raw Data ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -174,3 +239,10 @@ For example, it is theoretically possible to use S3 ``s3://`` for metadata and G But for Metadata, the data should be accessible to Flyte control plane. Data persistence is also pluggable. By default, it supports all major blob stores and uses an interface defined in Flytestdlib. + +Deleting Raw Data in Your Own Datastores +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Flyte does not offer a direct function to delete raw data stored in external datastores like ``S3`` or ``GCS``. However, you can manage deletion by configuring a lifecycle policy within your datastore service. + +If caching is enabled in your Flyte ``task``, ensure that the ``max-cache-age`` is set to be shorter than the lifecycle policy in your datastore to prevent potential data inconsistency issues. \ No newline at end of file diff --git a/docs/user_guide/concepts/main_concepts/tasks.rst b/docs/user_guide/concepts/main_concepts/tasks.rst index 8e5cc7aaec..90d3e1f750 100644 --- a/docs/user_guide/concepts/main_concepts/tasks.rst +++ b/docs/user_guide/concepts/main_concepts/tasks.rst @@ -123,3 +123,36 @@ Caching/Memoization Flyte supports memoization of task outputs to ensure that identical invocations of a task are not executed repeatedly, thereby saving compute resources and execution time. For example, if you wish to run the same piece of code multiple times, you can reuse the output instead of re-computing it. For more information on memoization, refer to the :std:doc:`/user_guide/development_lifecycle/caching`. + +### Retries and Spot Instances + +Tasks can define a retry strategy to handle different types of failures: + +1. **System Retries**: Used for infrastructure-level failures outside of user control: + - Spot instance preemptions + - Network issues + - Service unavailability + - Hardware failures + +*Important*: When running on spot/interruptible instances, preemptions count against the system retry budget, not the user retry budget. The last retry attempt automatically runs on a non-preemptible instance to ensure task completion. + +2. **User Retries**: Specified in the ``@task`` decorator (via ``retries`` parameter), used for: + - Application-level errors + - Invalid input handling + - Business logic failures + +.. code-block:: python + + @task(retries=3) # Sets user retry budget to 3 + def my_task() -> None: + ... + +### Alternative Retry Behavior + +Starting from 1.10.0, Flyte offers a simplified retry behavior where both system and user retries count towards a single retry budget defined in the task decorator. To enable this: + +1. Set ``configmap.core.propeller.node-config.ignore-retry-cause`` to ``true`` in helm values +2. Define retries in the task decorator to set the total retry budget +3. The last retries will automatically run on non-spot instances + +This provides a simpler, more predictable retry behavior while maintaining reliability. \ No newline at end of file diff --git a/docs/user_guide/customizing_dependencies/imagespec.md b/docs/user_guide/customizing_dependencies/imagespec.md index d9bf8f24bf..0e66eadd95 100644 --- a/docs/user_guide/customizing_dependencies/imagespec.md +++ b/docs/user_guide/customizing_dependencies/imagespec.md @@ -218,3 +218,11 @@ You can also force push an image in the Python code by calling the `force_push() image = ImageSpec(registry="ghcr.io/flyteorg", packages=["pandas"]).force_push() ``` [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/customizing_dependencies/ + +## Getting source files into ImageSpec +Typically, getting source code files into a task's image at run time on a live Flyte backend is done through the fast registration mechanism. + +However, if your `ImageSpec` constructor specifies a `source_root` and the `copy` argument is set to something other than `CopyFileDetection.NO_COPY`, then files will be copied regardless of fast registration status. +If the `source_root` and `copy` fields to an `ImageSpec` are left blank, then whether or not your source files are copied into the built `ImageSpec` image depends on whether or not you use fast registration. Please see [registering workflows](https://docs.flyte.org/en/latest/flyte_fundamentals/registering_workflows.html#containerizing-your-project) for the full explanation. + +Since files are sometimes copied into the built image, the tag that is published for an ImageSpec will change based on whether fast register is enabled, and the contents of any files copied. diff --git a/docs/user_guide/customizing_dependencies/raw_containers.md b/docs/user_guide/customizing_dependencies/raw_containers.md index ae72b652f3..857d23788f 100644 --- a/docs/user_guide/customizing_dependencies/raw_containers.md +++ b/docs/user_guide/customizing_dependencies/raw_containers.md @@ -15,9 +15,9 @@ Refer to the raw protocol to understand how to leverage this. To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. ``` -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/customizing_dependencies/customizing_dependencies/raw_container.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/1f4256464615a788c40d95dfe36f120407a2826c/examples/customizing_dependencies/customizing_dependencies/raw_container.py :caption: customizing_dependencies/raw_container.py -:lines: 1-5 +:lines: 1-6 ``` ## Container tasks @@ -31,15 +31,17 @@ is `calculate_ellipse_area_shell`. This name has to be unique in the entire proj `inputs` and `outputs` specify the interface for the task; thus it should be an ordered dictionary of typed input and output variables. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/customizing_dependencies/customizing_dependencies/raw_container.py +[Cache](https://docs.flyte.org/en/latest/user_guide/development_lifecycle/caching.html) can be enabled in a `ContainerTask` by configuring the cache settings in the `TaskMetadata` in the `metadata` parameter. + +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/1f4256464615a788c40d95dfe36f120407a2826c/examples/customizing_dependencies/customizing_dependencies/raw_container.py :caption: customizing_dependencies/raw_container.py -:lines: 15-112 +:lines: 16-118 ``` As can be seen in this example, `ContainerTask`s can be interacted with like normal Python functions, whose inputs correspond to the declared input variables. All data returned by the tasks are consumed and logged by a Flyte task. -```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/customizing_dependencies/customizing_dependencies/raw_container.py +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/1f4256464615a788c40d95dfe36f120407a2826c/examples/customizing_dependencies/customizing_dependencies/raw_container.py :caption: customizing_dependencies/raw_container.py :pyobject: wf ``` diff --git a/docs/user_guide/data_types_and_io/flytedirectory.md b/docs/user_guide/data_types_and_io/flytedirectory.md index 121a7d9b67..4ad2316ded 100644 --- a/docs/user_guide/data_types_and_io/flytedirectory.md +++ b/docs/user_guide/data_types_and_io/flytedirectory.md @@ -86,4 +86,21 @@ You can run the workflow locally as follows: :lines: 94-114 ``` + +## Streaming support + +Flyte `1.5` introduced support for streaming `FlyteDirectory` types via the `fsspec` library. +The `FlyteDirectory` streaming feature enables efficient streaming and handling of entire directories, simplifying operations involving multiple files. + +:::{note} +This feature is marked as experimental. We'd love feedback on the API! +::: + +Here is a simple example, you can accept a `FlyteDirectory` as an input, walk through it and copy the files to another `FlyteDirectory` one by one. + +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/ddce0448141ea6d2cb148df52bf408874adb15ad/examples/data_types_and_io/data_types_and_io/file_streaming.py +:caption: data_types_and_io/file_streaming.py +:lines: 23-33 +``` + [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/data_types_and_io/ diff --git a/docs/user_guide/data_types_and_io/flytefile.md b/docs/user_guide/data_types_and_io/flytefile.md index e9c02e2132..76dc0f6be8 100644 --- a/docs/user_guide/data_types_and_io/flytefile.md +++ b/docs/user_guide/data_types_and_io/flytefile.md @@ -8,9 +8,9 @@ Files are one of the most fundamental entities that users of Python work with, and they are fully supported by Flyte. In the IDL, they are known as -[Blob](https://github.com/flyteorg/flyteidl/blob/master/protos/flyteidl/core/literals.proto#L33) +[Blob](https://github.com/flyteorg/flyte/blob/master/flyteidl/protos/flyteidl/core/literals.proto#L33) literals which are backed by the -[blob type](https://github.com/flyteorg/flyteidl/blob/master/protos/flyteidl/core/types.proto#L47). +[blob type](https://github.com/flyteorg/flyte/blob/master/flyteidl/protos/flyteidl/core/types.proto#L73) Let's assume our mission here is pretty simple. We download a few CSV file links, read them with the python built-in {py:class}`csv.DictReader` function, @@ -90,4 +90,20 @@ You can enable type validation if you have the [python-magic](https://pypi.org/p Currently, type validation is only supported on the `Mac OS` and `Linux` platforms. ::: +## Streaming support + +Flyte `1.5` introduced support for streaming `FlyteFile` types via the `fsspec` library. +This integration enables efficient, on-demand access to remote files, eliminating the need for fully downloading them to local storage. + +:::{note} +This feature is marked as experimental. We'd love feedback on the API! +::: + +Here is a simple example of removing some columns from a CSV file and writing the result to a new file: + +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/ddce0448141ea6d2cb148df52bf408874adb15ad/examples/data_types_and_io/data_types_and_io/file_streaming.py +:caption: data_types_and_io/file_streaming.py +:lines: 8-20 +``` + [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/data_types_and_io/ diff --git a/docs/user_guide/data_types_and_io/index.md b/docs/user_guide/data_types_and_io/index.md index d03df92804..3280054696 100644 --- a/docs/user_guide/data_types_and_io/index.md +++ b/docs/user_guide/data_types_and_io/index.md @@ -148,4 +148,5 @@ accessing_attributes pytorch_type enum_type pickle_type +tensorflow_type ``` diff --git a/docs/user_guide/data_types_and_io/structureddataset.md b/docs/user_guide/data_types_and_io/structureddataset.md index e4eed0a956..9a82610590 100644 --- a/docs/user_guide/data_types_and_io/structureddataset.md +++ b/docs/user_guide/data_types_and_io/structureddataset.md @@ -39,7 +39,7 @@ To begin, import the dependencies for the example: ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 1-18 +:lines: 1-19 ``` Define a task that returns a Pandas DataFrame. @@ -68,7 +68,7 @@ First, initialize column types you want to extract from the `StructuredDataset`. ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 30-31 +:lines: 31-32 ``` Define a task that opens a structured dataset by calling `all()`. @@ -78,7 +78,7 @@ For instance, you can use ``pa.Table`` to convert the Pandas DataFrame to a PyAr ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 41-51 +:lines: 42-52 ``` The code may result in runtime failures if the columns do not match. @@ -91,7 +91,7 @@ and enable the CSV serialization by annotating the structured dataset with the C ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 57-71 +:lines: 58-72 ``` ## Storage driver and location @@ -230,14 +230,14 @@ and the byte format, which in this case is `PARQUET`. ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 127-129 +:lines: 128-130 ``` You can now use `numpy.ndarray` to deserialize the parquet file to NumPy and serialize a task's output (NumPy array) to a parquet file. ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 134-149 +:lines: 135-148 ``` :::{note} @@ -248,7 +248,7 @@ You can run the code locally as follows: ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 153-157 +:lines: 152-156 ``` ### The nested typed columns @@ -261,7 +261,7 @@ Nested field StructuredDataset should be run when flytekit version > 1.11.0. ```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/69dbe4840031a85d79d9ded25f80397c6834752d/examples/data_types_and_io/data_types_and_io/structured_dataset.py :caption: data_types_and_io/structured_dataset.py -:lines: 159-270 +:lines: 158-285 ``` [flytesnacks]: https://github.com/flyteorg/flytesnacks/tree/master/examples/data_types_and_io/ diff --git a/docs/user_guide/data_types_and_io/tensorflow_type.md b/docs/user_guide/data_types_and_io/tensorflow_type.md new file mode 100644 index 0000000000..a68ce5ecaf --- /dev/null +++ b/docs/user_guide/data_types_and_io/tensorflow_type.md @@ -0,0 +1,83 @@ +(tensorflow_type)= + +# TensorFlow types + +```{eval-rst} +.. tags:: MachineLearning, Basic +``` + +This document outlines the TensorFlow types available in Flyte, which facilitate the integration of TensorFlow models and datasets in Flyte workflows. + +### Import necessary libraries and modules +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +:caption: data_types_and_io/tensorflow_type.py +:lines: 2-14 +``` + +## Tensorflow model +Flyte supports the TensorFlow SavedModel format for serializing and deserializing `tf.keras.Model` instances. The `TensorFlowModelTransformer` is responsible for handling these transformations. + +### Transformer +- **Name:** TensorFlow Model +- **Class:** `TensorFlowModelTransformer` +- **Python Type:** `tf.keras.Model` +- **Blob Format:** `TensorFlowModel` +- **Dimensionality:** `MULTIPART` + +### Usage +The `TensorFlowModelTransformer` allows you to save a TensorFlow model to a remote location and retrieve it later in your Flyte workflows. + +```{note} +To clone and run the example code on this page, see the [Flytesnacks repo][flytesnacks]. +``` +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +:caption: data_types_and_io/tensorflow_type.py +:lines: 16-33 +``` + +## TFRecord files +Flyte supports TFRecord files through the TFRecordFile type, which can handle serialized TensorFlow records. The TensorFlowRecordFileTransformer manages the conversion of TFRecord files to and from Flyte literals. + +### Transformer +- **Name:** TensorFlow Record File +- **Class:** `TensorFlowRecordFileTransformer` +- **Blob Format:** `TensorFlowRecord` +- **Dimensionality:** `SINGLE` + +### Usage +The `TensorFlowRecordFileTransformer` enables you to work with single TFRecord files, making it easy to read and write data in TensorFlow's TFRecord format. + +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +:caption: data_types_and_io/tensorflow_type.py +:lines: 35-45 +``` + +## TFRecord directories +Flyte supports directories containing multiple TFRecord files through the `TFRecordsDirectory type`. The `TensorFlowRecordsDirTransformer` manages the conversion of TFRecord directories to and from Flyte literals. + +### Transformer +- **Name:** TensorFlow Record Directory +- **Class:** `TensorFlowRecordsDirTransformer` +- **Python Type:** `TFRecordsDirectory` +- **Blob Format:** `TensorFlowRecord` +- **Dimensionality:** `MULTIPART` + +### Usage +The `TensorFlowRecordsDirTransformer` allows you to work with directories of TFRecord files, which is useful for handling large datasets that are split across multiple files. + +#### Example +```{rli} https://raw.githubusercontent.com/flyteorg/flytesnacks/refs/heads/master/examples/data_types_and_io/data_types_and_io/tensorflow_type.py +:caption: data_types_and_io/tensorflow_type.py +:lines: 47-56 +``` + +## Configuration class: `TFRecordDatasetConfig` +The `TFRecordDatasetConfig` class is a data structure used to configure the parameters for creating a `tf.data.TFRecordDataset`, which allows for efficient reading of TFRecord files. This class uses the `DataClassJsonMixin` for easy JSON serialization. + +### Attributes +- **compression_type**: (Optional) Specifies the compression method used for the TFRecord files. Possible values include an empty string (no compression), "ZLIB", or "GZIP". +- **buffer_size**: (Optional) Defines the size of the read buffer in bytes. If not set, defaults will be used based on the local or remote file system. +- **num_parallel_reads**: (Optional) Determines the number of files to read in parallel. A value greater than one outputs records in an interleaved order. +- **name**: (Optional) Assigns a name to the operation for easier identification in the pipeline. + +This configuration is crucial for optimizing the reading process of TFRecord datasets, especially when dealing with large datasets or when specific performance tuning is required. diff --git a/docs/user_guide/development_lifecycle/caching.md b/docs/user_guide/development_lifecycle/caching.md index 7fc4237ec6..ea6a5af574 100644 --- a/docs/user_guide/development_lifecycle/caching.md +++ b/docs/user_guide/development_lifecycle/caching.md @@ -19,15 +19,23 @@ Let's watch a brief explanation of caching and a demo in this video, followed by ``` +### Input Caching + +In Flyte, input caching allows tasks to automatically cache the input data required for execution. This feature is particularly useful in scenarios where tasks may need to be re-executed, such as during retries due to failures or when manually triggered by users. By caching input data, Flyte optimizes workflow performance and resource usage, preventing unnecessary recomputation of task inputs. + +### Output Caching + +Output caching in Flyte allows users to cache the results of tasks to avoid redundant computations. This feature is especially valuable for tasks that perform expensive or time-consuming operations where the results are unlikely to change frequently. + There are four parameters and one command-line flag related to caching. ## Parameters * `cache`(`bool`): Enables or disables caching of the workflow, task, or launch plan. By default, caching is disabled to avoid unintended consequences when caching executions with side effects. -To enable caching set `cache=True`. +To enable caching, set `cache=True`. * `cache_version` (`str`): Part of the cache key. -A change to this parameter will invalidate the cache. +Changing this version number tells Flyte to ignore previous cached results and run the task again if the task's function has changed. This allows you to explicitly indicate when a change has been made to the task that should invalidate any existing cached results. Note that this is not the only change that will invalidate the cache (see below). Also, note that you can manually trigger cache invalidation per execution using the [`overwrite-cache` flag](#overwrite-cache-flag). @@ -35,7 +43,7 @@ Also, note that you can manually trigger cache invalidation per execution using When enabled, Flyte ensures that a single instance of the task is run before any other instances that would otherwise run concurrently. This allows the initial instance to cache its result and lets the later instances reuse the resulting cached outputs. Cache serialization is disabled by default. -* `cache_ignore_input_vars` (`Tuple[str, ...]`): Input variables that should not be included when calculating hash for cache. By default, no input variables are ignored. This parameter only applies to task serialization. +* `cache_ignore_input_vars` (`Tuple[str, ...]`): Input variables that Flyte should ignore when deciding if a task’s result can be reused (hash calculation). By default, no input variables are ignored. This parameter only applies to task serialization. Task caching parameters can be specified at task definition time within `@task` decorator or at task invocation time using `with_overrides` method. @@ -127,7 +135,7 @@ Task executions can be cached across different versions of the task because a ch ### How does local caching work? -The flytekit package uses the [diskcache](https://github.com/grantjenks/python-diskcache) package, specifically [diskcache.Cache](http://www.grantjenks.com/docs/diskcache/tutorial.html#cache), to aid in the memoization of task executions. The results of local task executions are stored under `~/.flyte/local-cache/` and cache keys are composed of **Cache Version**, **Task Signature**, and **Task Input Values**. +Flyte uses a tool called [diskcache](https://github.com/grantjenks/python-diskcache), specifically [diskcache.Cache](http://www.grantjenks.com/docs/diskcache/tutorial.html#cache), to save task results so they don’t need to be recomputed if the same task is executed again, a technique known as ``memoization``. The results of local task executions are stored under `~/.flyte/local-cache/` and cache keys are composed of **Cache Version**, **Task Signature**, and **Task Input Values**. Similar to the remote case, a local cache entry for a task will be invalidated if either the `cache_version` or the task signature is modified. In addition, the local cache can also be emptied by running the following command: `pyflyte local-cache clear`, which essentially obliterates the contents of the `~/.flyte/local-cache/` directory. To disable the local cache, you can set the `local.cache_enabled` config option (e.g. by setting the environment variable `FLYTE_LOCAL_CACHE_ENABLED=False`). diff --git a/docs/user_guide/flyte_fundamentals/optimizing_tasks.md b/docs/user_guide/flyte_fundamentals/optimizing_tasks.md index 00b3c693f1..5be50448ea 100644 --- a/docs/user_guide/flyte_fundamentals/optimizing_tasks.md +++ b/docs/user_guide/flyte_fundamentals/optimizing_tasks.md @@ -52,16 +52,26 @@ represents the cache key. Learn more in the {ref}`User Guide float: @@ -70,10 +80,28 @@ def compute_mean(data: List[float]) -> float: return sum(data) / len(data) ``` -```{note} -Retries only take effect when running a task on a Flyte cluster. -See {ref}`Fault Tolerance ` for details on the types of errors that will be retried. -``` + +- **System Errors**: Managed at the platform level through settings like `max-node-retries-system-failures` in the FlytePropeller configuration. This setting helps manage retries without requiring changes to the task code. + + Additionally, the `interruptible-failure-threshold` option in the node-config key defines how many system-level retries are considered interruptible. This is particularly useful for tasks running on preemptible instances. + + For more details, refer to the [Flyte Propeller Configuration](https://docs.flyte.org/en/latest/deployment/configuration/generated/flytepropeller_config.html#config-nodeconfig). + + +### Interruptible Tasks and Map Tasks + +Tasks marked as interruptible can be preempted and retried without counting against the USER error budget. This is useful for tasks running on preemptible compute resources like spot instances. + +For map tasks, the interruptible behavior aligns with that of regular tasks. The `retries` field in the task annotation is not necessary for handling SYSTEM errors, as these are managed by the platform's configuration. Alternatively, the USER budget is set by defining retries in the task decorator. + +Map Tasks: The behavior of interruptible tasks extends seamlessly to map tasks. The platform's configuration manages SYSTEM errors, ensuring consistency across task types without additional task-level settings. + +### Advanced Retry Policies + +Flyte supports advanced configurations that allow more granular control over retry behavior, such as specifying the number of retries that can be interruptible. This advanced setup helps in finely tuning the task executions based on the criticality and resource availability. + +For a deeper dive into configuring retries and understanding their impact, see the [Fault Tolerance](https://docs.flyte.org/en/latest/concepts/fault-tolerance.html) section in the Flyte documentation. + ## Timeouts @@ -245,6 +273,47 @@ the resources that you need. In this case, that need is distributed training, but Flyte also provides integrations for {ref}`Spark `, {ref}`Ray `, {ref}`MPI `, {ref}`Snowflake `, and more. +## Retries and Spot Instances + +When running tasks on spot/interruptible instances, it's important to understand how retries work: + +```python +from flytekit import task + +@task( + retries=3, # User retry budget + interruptible=True # Enables running on spot instances +) +def my_task() -> None: + ... +``` + +### Default Retry Behavior +- Spot instance preemptions count against the system retry budget (not user retries) +- The last system retry automatically runs on a non-preemptible instance +- User retries (specified in `@task` decorator) are only used for application errors + +### Simplified Retry Behavior +Flyte also offers a simplified retry model where both system and user retries count towards a single budget: + +```python +@task( + retries=5, # Total retry budget for both system and user errors + interruptible=True +) +def my_task() -> None: + ... +``` + +To enable this behavior: +1. Set `configmap.core.propeller.node-config.ignore-retry-cause=true` in platform config +2. Define total retry budget in task decorator +3. Last retries automatically run on non-spot instances + +Choose the retry model that best fits your use case: +- Default: Separate budgets for system vs user errors +- Simplified: Single retry budget with guaranteed completion + Even though Flyte itself is a powerful compute engine and orchestrator for data engineering, machine learning, and analytics, perhaps you have existing code that leverages other platforms. Flyte recognizes the pain of migrating code, diff --git a/docs/user_guide/flyte_fundamentals/registering_workflows.md b/docs/user_guide/flyte_fundamentals/registering_workflows.md index 1c53a78020..f178d5a53c 100644 --- a/docs/user_guide/flyte_fundamentals/registering_workflows.md +++ b/docs/user_guide/flyte_fundamentals/registering_workflows.md @@ -72,7 +72,7 @@ run it with the supplied arguments. As you can see from the expected output, you can visit the link to the Flyte console to see the progress of your running execution. -You may also run `run --remote --copy-all`, which is very similar to the above command. As the name suggests, this will copy the source tree rooted at the top-level `__init__.py` file. With this strategy, any modules discoverable on the `PYTHONPATH` will be importable. +You may also run `run --remote --copy all`, which is very similar to the above command. As the name suggests, this will copy the source tree rooted at the top-level `__init__.py` file. With this strategy, any modules discoverable on the `PYTHONPATH` will be importable. ```{note} `pyflyte run` supports Flyte workflows that import any other user-defined modules that @@ -260,6 +260,13 @@ metadata/configuration, it's more secure if they're private. Learn more about how to pull private image in the {ref}`User Guide `. ``` +##### Relationship between ImageSpec and fast registration +The `ImageSpec` construct available in flytekit also has a mechanism to copy files into the image being built. Its behavior depends on the type of registration used: +* If fast register is used, then it's assumed that you don't also want to copy source files into the built image. +* If fast register is not used (which is the default for `pyflyte package`, or if `pyflyte register --copy none` is specified), then it's assumed that you do want source files copied into the built image. + +If your `ImageSpec` constructor specifies a `source_root` and the `copy` argument is set to something other than `CopyFileDetection.NO_COPY`, then files will be copied regardless of fast registration status. + #### Package your project with `pyflyte package` You can package your project with the `pyflyte package` command like so: @@ -288,7 +295,7 @@ entities compiled as protobuf files that you can register with multiple Flyte clusters. ````{note} -Like `pyflyte register`, can also specify multiple workflow directories, like: +You can specify multiple workflow directories using the following command: ```{prompt} bash $ pyflyte --pkgs --pkgs package ... @@ -297,6 +304,12 @@ pyflyte --pkgs --pkgs package ... This is useful in cases where you want to register two different Flyte projects that you maintain in a single place. + +If you encounter a ``ModuleNotFoundError`` when packaging, use the `--source` option to include the correct source paths. For instance: + +```{prompt} bash $ +pyflyte --pkgs package --source ./src -f +``` ```` #### Register with `flytectl register` @@ -358,6 +371,17 @@ two GitHub actions that facilitates this: of Flyte packages, for example, the `.tgz` archives that are created by `pyflyte package`. +### Some CI/CD best practices + +In case Flyte workflows are registered on each commit in your build pipelines, you can consider the following recommendations and approach: + +- **Versioning Strategy** : Determining the version of the build for different types of commits makes them consistent and identifiable. For commits on feature branches, use `-` and for the ones on main branches, use `main-`. Use version numbers for the released (tagged) versions. + +- **Workflow Serialization and Registration** : Workflows should be serialized and registered based on the versioning of the build and the container image. Depending on whether the build is for a feature branch or main, the registration domain should be adjusted accordingly. For more context, please visit the [Registering workflows](https://docs.flyte.org/en/latest/user_guide/flyte_fundamentals/registering_workflows.html) page. + +- **Container Image Specification** : When managing multiple images across tasks within a Flyte workflow, use the `--image` flag during registration to specify which image to use. This avoids hardcoding the image within the task definition, promoting reusability and flexibility in workflows. + + ## What's next? In this guide, you learned about the Flyte demo cluster, Flyte configuration, and diff --git a/flyteadmin/.golangci.yml b/flyteadmin/.golangci.yml index 4dbb031812..cd180b89d1 100644 --- a/flyteadmin/.golangci.yml +++ b/flyteadmin/.golangci.yml @@ -39,3 +39,5 @@ issues: exclude-rules: - path: pkg/workflowengine/impl/prepare_execution.go text: "copies lock" + - path: pkg/runtime/interfaces/application_configuration.go + text: "G402: TLS InsecureSkipVerify may be true." diff --git a/flyteadmin/Makefile b/flyteadmin/Makefile index fad51a2edf..4a715a02ce 100644 --- a/flyteadmin/Makefile +++ b/flyteadmin/Makefile @@ -74,7 +74,7 @@ server: .PHONY: scheduler scheduler: - go run scheduler/main.go run --server.kube-config ~/.kube/config --config flyteadmin_config.yaml + go run cmd/scheduler/main.go run --server.kube-config ~/.kube/config --config flyteadmin_config.yaml .PHONY: migrate migrate: diff --git a/flyteadmin/auth/authzserver/provider.go b/flyteadmin/auth/authzserver/provider.go index b2948331fb..7df738a2d8 100644 --- a/flyteadmin/auth/authzserver/provider.go +++ b/flyteadmin/auth/authzserver/provider.go @@ -16,6 +16,7 @@ import ( fositeOAuth2 "github.com/ory/fosite/handler/oauth2" "github.com/ory/fosite/storage" "github.com/ory/fosite/token/jwt" + "github.com/prometheus/client_golang/prometheus" "k8s.io/apimachinery/pkg/util/sets" "github.com/flyteorg/flyte/flyteadmin/auth" @@ -24,6 +25,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flytestdlib/logger" + "github.com/flyteorg/flyte/flytestdlib/promutils" ) const ( @@ -33,12 +35,18 @@ const ( KeyIDClaim = "key_id" ) +type providerMetrics struct { + InvalidTokens prometheus.Counter + ExpiredTokens prometheus.Counter +} + // Provider implements OAuth2 Authorization Server. type Provider struct { fosite.OAuth2Provider cfg config.AuthorizationServer publicKey []rsa.PublicKey keySet jwk.Set + metrics providerMetrics } func (p Provider) PublicKeys() []rsa.PublicKey { @@ -111,15 +119,28 @@ func (p Provider) ValidateAccessToken(ctx context.Context, expectedAudience, tok }) if err != nil { + logger.Infof(ctx, "failed to parse token for audience '%s'. Error: %v", expectedAudience, err) return nil, err } if !parsedToken.Valid { + if ve, ok := err.(*jwtgo.ValidationError); ok && ve.Is(jwtgo.ErrTokenExpired) { + logger.Infof(ctx, "parsed token for audience '%s' is expired", expectedAudience) + p.metrics.ExpiredTokens.Inc() + } else { + logger.Infof(ctx, "parsed token for audience '%s' is invalid: %+v", expectedAudience, err) + p.metrics.InvalidTokens.Inc() + } return nil, fmt.Errorf("parsed token is invalid") } claimsRaw := parsedToken.Claims.(jwtgo.MapClaims) - return verifyClaims(sets.NewString(expectedAudience), claimsRaw) + identityCtx, err := verifyClaims(sets.NewString(expectedAudience), claimsRaw) + if err != nil { + logger.Infof(ctx, "failed to verify claims for audience: '%s'. Error: %v", expectedAudience, err) + return nil, err + } + return identityCtx, nil } // NewProvider creates a new OAuth2 Provider that is able to do OAuth 2-legged and 3-legged flows. It'll lookup @@ -127,7 +148,7 @@ func (p Provider) ValidateAccessToken(ctx context.Context, expectedAudience, tok // sign and generate hashes for tokens. The RSA Private key is expected to be in PEM format with the public key embedded. // Use auth.GetInitSecretsCommand() to generate new valid secrets that will be accepted by this provider. // The config.SecretNameClaimSymmetricKey must be a 32-bytes long key in Base64Encoding. -func NewProvider(ctx context.Context, cfg config.AuthorizationServer, sm core.SecretManager) (Provider, error) { +func NewProvider(ctx context.Context, cfg config.AuthorizationServer, sm core.SecretManager, scope promutils.Scope) (Provider, error) { // fosite requires four parameters for the server to get up and running: // 1. config - for any enforcement you may desire, you can do this using `compose.Config`. You like PKCE, enforce it! // 2. store - no auth service is generally useful unless it can remember clients and users. @@ -230,5 +251,9 @@ func NewProvider(ctx context.Context, cfg config.AuthorizationServer, sm core.Se OAuth2Provider: oauth2Provider, publicKey: publicKeys, keySet: keysSet, + metrics: providerMetrics{ + ExpiredTokens: scope.MustNewCounter("expired_token", "The number of expired tokens"), + InvalidTokens: scope.MustNewCounter("invalid_tokens", "The number of invalid tokens"), + }, }, nil } diff --git a/flyteadmin/auth/authzserver/provider_test.go b/flyteadmin/auth/authzserver/provider_test.go index 45f0778b51..50fcaa5de1 100644 --- a/flyteadmin/auth/authzserver/provider_test.go +++ b/flyteadmin/auth/authzserver/provider_test.go @@ -18,6 +18,7 @@ import ( "github.com/flyteorg/flyte/flyteadmin/auth/config" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/flytestdlib/promutils" ) func newMockProvider(t testing.TB) (Provider, auth.SecretsSet) { @@ -36,7 +37,7 @@ func newMockProvider(t testing.TB) (Provider, auth.SecretsSet) { sm.OnGet(ctx, config.SecretNameTokenSigningRSAKey).Return(buf.String(), nil) sm.OnGet(ctx, config.SecretNameOldTokenSigningRSAKey).Return(buf.String(), nil) - p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm) + p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm, promutils.NewTestScope()) assert.NoError(t, err) return p, secrets } @@ -58,7 +59,7 @@ func newInvalidMockProvider(ctx context.Context, t *testing.T, secrets auth.Secr sm.OnGet(ctx, config.SecretNameOldTokenSigningRSAKey).Return(buf.String(), nil) invalidFunc() - p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm) + p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm, promutils.NewTestScope()) assert.Error(t, err) assert.ErrorContains(t, err, errorContains) assert.Equal(t, Provider{}, p) @@ -294,7 +295,7 @@ func TestProvider_ValidateAccessToken(t *testing.T) { sm.OnGet(ctx, config.SecretNameTokenSigningRSAKey).Return(buf.String(), nil) sm.OnGet(ctx, config.SecretNameOldTokenSigningRSAKey).Return(buf.String(), nil) - p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm) + p, err := NewProvider(ctx, config.DefaultConfig.AppAuth.SelfAuthServer, sm, promutils.NewTestScope()) assert.NoError(t, err) // create a signer for rsa 256 diff --git a/flyteadmin/auth/handlers.go b/flyteadmin/auth/handlers.go index b839cf26d0..d8bc626652 100644 --- a/flyteadmin/auth/handlers.go +++ b/flyteadmin/auth/handlers.go @@ -301,23 +301,25 @@ func GetAuthenticationInterceptor(authCtx interfaces.AuthenticationContext) func fromHTTP := metautils.ExtractIncoming(ctx).Get(FromHTTPKey) isFromHTTP := fromHTTP == FromHTTPVal - identityContext, err := GRPCGetIdentityFromAccessToken(ctx, authCtx) - if err == nil { + identityContext, accessTokenErr := GRPCGetIdentityFromAccessToken(ctx, authCtx) + if accessTokenErr == nil { return SetContextForIdentity(ctx, identityContext), nil } - logger.Infof(ctx, "Failed to parse Access Token from context. Will attempt to find IDToken. Error: %v", err) + logger.Infof(ctx, "Failed to parse Access Token from context. Will attempt to find IDToken. Error: %v", accessTokenErr) - identityContext, err = GRPCGetIdentityFromIDToken(ctx, authCtx.Options().UserAuth.OpenID.ClientID, + identityContext, idTokenErr := GRPCGetIdentityFromIDToken(ctx, authCtx.Options().UserAuth.OpenID.ClientID, authCtx.OidcProvider()) - if err == nil { + if idTokenErr == nil { return SetContextForIdentity(ctx, identityContext), nil } + logger.Debugf(ctx, "Failed to parse ID Token from context. Error: %v", idTokenErr) // Only enforcement logic is present. The default case is to let things through. if (isFromHTTP && !authCtx.Options().DisableForHTTP) || (!isFromHTTP && !authCtx.Options().DisableForGrpc) { + err := fmt.Errorf("id token err: %w, access token err: %w", fmt.Errorf("access token err: %w", accessTokenErr), idTokenErr) return ctx, status.Errorf(codes.Unauthenticated, "token parse error %s", err) } diff --git a/flyteadmin/cmd/entrypoints/migrate.go b/flyteadmin/cmd/entrypoints/migrate.go index c7ad6058c5..7ee28150f0 100644 --- a/flyteadmin/cmd/entrypoints/migrate.go +++ b/flyteadmin/cmd/entrypoints/migrate.go @@ -6,6 +6,7 @@ import ( "github.com/spf13/cobra" _ "gorm.io/driver/postgres" // Required to import database driver. + "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/config" "github.com/flyteorg/flyte/flyteadmin/pkg/server" ) @@ -40,7 +41,8 @@ var seedProjectsCmd = &cobra.Command{ Short: "Seed projects in the database.", RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() - return server.SeedProjects(ctx, args) + seedProjects := config.UniqueProjectsFromNames(args) + return server.SeedProjects(ctx, seedProjects) }, } diff --git a/flyteadmin/cmd/entrypoints/serve.go b/flyteadmin/cmd/entrypoints/serve.go index 794b0d6671..a5a9b98bb3 100644 --- a/flyteadmin/cmd/entrypoints/serve.go +++ b/flyteadmin/cmd/entrypoints/serve.go @@ -12,6 +12,7 @@ import ( "github.com/flyteorg/flyte/flytestdlib/logger" "github.com/flyteorg/flyte/flytestdlib/otelutils" "github.com/flyteorg/flyte/flytestdlib/profutils" + _ "github.com/flyteorg/flyte/flytestdlib/promutils" ) var pluginRegistryStore = plugins.NewAtomicRegistry(plugins.NewRegistry()) diff --git a/flyteadmin/dataproxy/service.go b/flyteadmin/dataproxy/service.go index d61998835f..c02fa3699f 100644 --- a/flyteadmin/dataproxy/service.go +++ b/flyteadmin/dataproxy/service.go @@ -12,6 +12,7 @@ import ( "strings" "time" + "github.com/samber/lo" "google.golang.org/grpc/codes" "google.golang.org/protobuf/types/known/durationpb" "google.golang.org/protobuf/types/known/timestamppb" @@ -63,7 +64,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp // If we fall in here, that means that the full path is deterministic and we should check for existence. if len(req.Filename) > 0 && len(req.FilenameRoot) > 0 { knownLocation, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload, - req.Project, req.Domain, req.FilenameRoot, req.Filename) + req.Org, req.Project, req.Domain, req.FilenameRoot, req.Filename) if err != nil { logger.Errorf(ctx, "failed to create storage location. Error %v", err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create storage location, Error: %v", err) @@ -125,7 +126,7 @@ func (s Service) CreateUploadLocation(ctx context.Context, req *service.CreateUp prefix = base32.StdEncoding.EncodeToString(req.ContentMd5) } storagePath, err := createStorageLocation(ctx, s.dataStore, s.cfg.Upload, - req.Project, req.Domain, prefix, req.Filename) + req.Org, req.Project, req.Domain, prefix, req.Filename) if err != nil { logger.Errorf(ctx, "failed to create shardedStorageLocation. Error %v", err) return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to create shardedStorageLocation, Error: %v", err) @@ -181,7 +182,17 @@ func (s Service) CreateDownloadLink(ctx context.Context, req *service.CreateDown return nil, errors.NewFlyteAdminErrorf(codes.Internal, "no deckUrl found for request [%+v]", req) } - signedURLResp, err := s.dataStore.CreateSignedURL(ctx, storage.DataReference(nativeURL), storage.SignedURLProperties{ + ref := storage.DataReference(nativeURL) + meta, err := s.dataStore.Head(ctx, ref) + if err != nil { + return nil, errors.NewFlyteAdminErrorf(codes.Internal, "failed to head object before signing url. Error: %v", err) + } + + if !meta.Exists() { + return nil, errors.NewFlyteAdminErrorf(codes.NotFound, "object not found") + } + + signedURLResp, err := s.dataStore.CreateSignedURL(ctx, ref, storage.SignedURLProperties{ Scope: stow.ClientMethodGet, ExpiresIn: req.ExpiresIn.AsDuration(), }) @@ -287,6 +298,9 @@ func (s Service) validateCreateDownloadLinkRequest(req *service.CreateDownloadLi func createStorageLocation(ctx context.Context, store *storage.DataStore, cfg config.DataProxyUploadConfig, keyParts ...string) (storage.DataReference, error) { + keyParts = lo.Filter(keyParts, func(key string, _ int) bool { + return key != "" + }) storagePath, err := store.ConstructReference(ctx, store.GetBaseContainerFQN(ctx), append([]string{cfg.StoragePrefix}, keyParts...)...) if err != nil { diff --git a/flyteadmin/dataproxy/service_test.go b/flyteadmin/dataproxy/service_test.go index 81193e106b..4c3f3ea720 100644 --- a/flyteadmin/dataproxy/service_test.go +++ b/flyteadmin/dataproxy/service_test.go @@ -4,12 +4,15 @@ import ( "bytes" "context" "crypto/md5" // #nosec + "fmt" "net/url" "testing" "time" "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" "google.golang.org/protobuf/types/known/durationpb" commonMocks "github.com/flyteorg/flyte/flyteadmin/pkg/common/mocks" @@ -157,6 +160,15 @@ func TestCreateUploadLocationMore(t *testing.T) { }) } +type testMetadata struct { + storage.Metadata + exists bool +} + +func (t testMetadata) Exists() bool { + return t.exists +} + func TestCreateDownloadLink(t *testing.T) { dataStore := commonMocks.GetMockStorageClient() nodeExecutionManager := &mocks.MockNodeExecutionManager{} @@ -179,7 +191,30 @@ func TestCreateDownloadLink(t *testing.T) { assert.Error(t, err) }) + t.Run("item not found", func(t *testing.T) { + dataStore.ComposedProtobufStore.(*commonMocks.TestDataStore).HeadCb = func(ctx context.Context, ref storage.DataReference) (storage.Metadata, error) { + return testMetadata{exists: false}, nil + } + + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ + ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, + Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ + NodeExecutionId: &core.NodeExecutionIdentifier{}, + }, + ExpiresIn: durationpb.New(time.Hour), + }) + + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, codes.NotFound, st.Code()) + assert.Equal(t, "object not found", st.Message()) + }) + t.Run("valid config", func(t *testing.T) { + dataStore.ComposedProtobufStore.(*commonMocks.TestDataStore).HeadCb = func(ctx context.Context, ref storage.DataReference) (storage.Metadata, error) { + return testMetadata{exists: true}, nil + } + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ @@ -187,10 +222,34 @@ func TestCreateDownloadLink(t *testing.T) { }, ExpiresIn: durationpb.New(time.Hour), }) + assert.NoError(t, err) }) + t.Run("head failed", func(t *testing.T) { + dataStore.ComposedProtobufStore.(*commonMocks.TestDataStore).HeadCb = func(ctx context.Context, ref storage.DataReference) (storage.Metadata, error) { + return testMetadata{}, fmt.Errorf("head fail") + } + + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ + ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, + Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ + NodeExecutionId: &core.NodeExecutionIdentifier{}, + }, + ExpiresIn: durationpb.New(time.Hour), + }) + + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, codes.Internal, st.Code()) + assert.Equal(t, "failed to head object before signing url. Error: head fail", st.Message()) + }) + t.Run("use default ExpiresIn", func(t *testing.T) { + dataStore.ComposedProtobufStore.(*commonMocks.TestDataStore).HeadCb = func(ctx context.Context, ref storage.DataReference) (storage.Metadata, error) { + return testMetadata{exists: true}, nil + } + _, err = s.CreateDownloadLink(context.Background(), &service.CreateDownloadLinkRequest{ ArtifactType: service.ArtifactType_ARTIFACT_TYPE_DECK, Source: &service.CreateDownloadLinkRequest_NodeExecutionId{ @@ -422,3 +481,23 @@ func TestService_Error(t *testing.T) { assert.Error(t, err, "no task executions") }) } + +func TestCreateStorageLocation(t *testing.T) { + ctx := context.TODO() + dataStore := commonMocks.GetMockStorageClient() + expectedStoragePath := storage.DataReference("s3://bucket/prefix/foo/bar/baz") + t.Run("no empty parts", func(t *testing.T) { + storagePath, err := createStorageLocation(ctx, dataStore, config.DataProxyUploadConfig{ + StoragePrefix: "prefix", + }, "foo", "bar", "baz") + assert.NoError(t, err) + assert.Equal(t, expectedStoragePath, storagePath) + }) + t.Run("with empty parts", func(t *testing.T) { + storagePath, err := createStorageLocation(ctx, dataStore, config.DataProxyUploadConfig{ + StoragePrefix: "prefix", + }, "foo", "bar", "", "baz") + assert.NoError(t, err) + assert.Equal(t, expectedStoragePath, storagePath) + }) +} diff --git a/flyteadmin/go.mod b/flyteadmin/go.mod index 852082add9..5c008a46eb 100644 --- a/flyteadmin/go.mod +++ b/flyteadmin/go.mod @@ -40,9 +40,10 @@ require ( github.com/ory/fosite v0.42.2 github.com/ory/x v0.0.214 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 - github.com/prometheus/client_model v0.4.0 + github.com/prometheus/client_golang v1.19.1 + github.com/prometheus/client_model v0.6.1 github.com/robfig/cron/v3 v3.0.0 + github.com/samber/lo v1.47.0 github.com/sendgrid/sendgrid-go v3.10.0+incompatible github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 @@ -51,12 +52,13 @@ require ( go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.47.0 go.opentelemetry.io/otel v1.24.0 golang.org/x/net v0.27.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.18.0 + golang.org/x/sync v0.7.0 golang.org/x/time v0.5.0 google.golang.org/api v0.155.0 google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 gorm.io/driver/mysql v1.4.4 gorm.io/driver/postgres v1.5.3 gorm.io/driver/sqlite v1.5.4 @@ -87,7 +89,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect @@ -136,7 +138,7 @@ require ( github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.9.8 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/lestrrat-go/backoff/v2 v2.0.8 // indirect github.com/lestrrat-go/blackmagic v1.0.2 // indirect @@ -145,10 +147,9 @@ require ( github.com/lestrrat-go/option v1.0.1 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/mattn/goveralls v0.0.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect @@ -163,9 +164,11 @@ require ( github.com/pierrec/lz4 v2.5.2+incompatible // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/sendgrid/rest v2.6.9+incompatible // indirect + github.com/shamaton/msgpack/v2 v2.2.2 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -188,9 +191,10 @@ require ( go.opentelemetry.io/otel/sdk v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/crypto v0.25.0 // indirect golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect - golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect @@ -217,9 +221,6 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20180710155616-bc664df96737 // indirect github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.8.0 github.com/imdario/mergo v0.3.13 // indirect - github.com/prometheus/common v0.44.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - go.uber.org/zap v1.27.0 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect diff --git a/flyteadmin/go.sum b/flyteadmin/go.sum index bab1e3f1f3..ec5e0cdc1c 100644 --- a/flyteadmin/go.sum +++ b/flyteadmin/go.sum @@ -135,8 +135,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -875,8 +875,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf v0.14.1-0.20201201075439-e0853799f9ec/go.mod h1:H5mEFsTeWizwFXHKtsITL5ipsLTuAMQoGuQpp+1JL9U= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -968,8 +969,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -979,8 +981,6 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/mattn/goveralls v0.0.6 h1:cr8Y0VMo/MnEZBjxNN/vh6G90SZ7IMb6lms1dzMoO+Y= github.com/mattn/goveralls v0.0.6/go.mod h1:h8b4ow6FxSPMQHF6o2ve3qsclnffZjYTNEKmLesRwqw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1110,32 +1110,33 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhnvrm/simples3 v0.5.0/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1156,6 +1157,8 @@ github.com/rubenv/sql-migrate v0.0.0-20190212093014-1007f53448d7/go.mod h1:WS0rl github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/santhosh-tekuri/jsonschema/v2 v2.1.0/go.mod h1:yzJzKUGV4RbWqWIBBP4wSOBqavX5saE02yirLS0OTyg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1543,8 +1546,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1643,6 +1646,7 @@ golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1917,8 +1921,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/DataDog/dd-trace-go.v1 v1.22.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.27.0/go.mod h1:Sp1lku8WJMvNV0kjDI4Ni/T7J/U3BO5ct5kEaoVU8+I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/flyteadmin/pkg/async/cloudevent/factory.go b/flyteadmin/pkg/async/cloudevent/factory.go index 65cd48de93..51c38ffea4 100644 --- a/flyteadmin/pkg/async/cloudevent/factory.go +++ b/flyteadmin/pkg/async/cloudevent/factory.go @@ -73,12 +73,7 @@ func NewCloudEventsPublisher(ctx context.Context, db repositoryInterfaces.Reposi case cloudEventImplementations.Kafka: saramaConfig := sarama.NewConfig() - var err error - saramaConfig.Version, err = sarama.ParseKafkaVersion(cloudEventsConfig.KafkaConfig.Version) - if err != nil { - logger.Fatalf(ctx, "failed to parse kafka version, %v", err) - panic(err) - } + cloudEventsConfig.KafkaConfig.UpdateSaramaConfig(ctx, saramaConfig) kafkaSender, err := kafka_sarama.NewSender(cloudEventsConfig.KafkaConfig.Brokers, saramaConfig, cloudEventsConfig.EventsPublisherConfig.TopicName) if err != nil { panic(err) diff --git a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go index 228db852d0..7aaab0bb60 100644 --- a/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go +++ b/flyteadmin/pkg/async/cloudevent/implementations/cloudevent_publisher.go @@ -207,7 +207,7 @@ func getNodeExecutionContext(ctx context.Context, identifier *core.NodeExecution func (c *CloudEventWrappedPublisher) getLatestTaskExecutions(ctx context.Context, nodeExecutionID *core.NodeExecutionIdentifier) (*admin.TaskExecution, error) { ctx = getNodeExecutionContext(ctx, nodeExecutionID) - identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, nodeExecutionID) + identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, nodeExecutionID, common.TaskExecution) if err != nil { return nil, err } diff --git a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go index 5ad49a7257..d48efeeee9 100644 --- a/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go +++ b/flyteadmin/pkg/async/notifications/implementations/gcp_processor_test.go @@ -48,7 +48,7 @@ func TestGcpProcessor_StartProcessing(t *testing.T) { m := &dto.Metric{} err := testGcpProcessor.(*GcpProcessor).systemMetrics.MessageSuccess.Write(m) assert.Nil(t, err) - assert.Equal(t, "counter:{value:1}", m.String()) + assert.Equal(t, float64(1), m.GetCounter().GetValue()) } func TestGcpProcessor_StartProcessingNoMessages(t *testing.T) { @@ -63,7 +63,7 @@ func TestGcpProcessor_StartProcessingNoMessages(t *testing.T) { m := &dto.Metric{} err := testGcpProcessor.(*GcpProcessor).systemMetrics.MessageSuccess.Write(m) assert.Nil(t, err) - assert.Equal(t, "counter:{value:0}", m.String()) + assert.Equal(t, float64(0), m.GetCounter().GetValue()) } func TestGcpProcessor_StartProcessingError(t *testing.T) { @@ -96,7 +96,7 @@ func TestGcpProcessor_StartProcessingEmailError(t *testing.T) { m := &dto.Metric{} err := testGcpProcessor.(*GcpProcessor).systemMetrics.MessageProcessorError.Write(m) assert.Nil(t, err) - assert.Equal(t, "counter:{value:1}", m.String()) + assert.Equal(t, float64(1), m.GetCounter().GetValue()) } func TestGcpProcessor_StopProcessing(t *testing.T) { diff --git a/flyteadmin/pkg/common/filters.go b/flyteadmin/pkg/common/filters.go index cf7987bdf5..57756e7820 100644 --- a/flyteadmin/pkg/common/filters.go +++ b/flyteadmin/pkg/common/filters.go @@ -96,6 +96,13 @@ var executionIdentifierFields = map[string]bool{ "name": true, } +// Entities that have special case handling for execution identifier fields. +var executionIdentifierEntities = map[Entity]bool{ + Execution: true, + NodeExecution: true, + TaskExecution: true, +} + var entityMetadataFields = map[string]bool{ "description": true, "state": true, @@ -253,7 +260,7 @@ func (f *inlineFilterImpl) GetGormJoinTableQueryExpr(tableName string) (GormQuer func customizeField(field string, entity Entity) string { // Execution identifier fields have to be customized because we differ from convention in those column names. - if entity == Execution && executionIdentifierFields[field] { + if executionIdentifierEntities[entity] && executionIdentifierFields[field] { return fmt.Sprintf("execution_%s", field) } // admin_tag table has been migrated to an execution_tag table, so we need to customize the field name. diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go index 6ae9a61a52..27acf152ec 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager.go +++ b/flyteadmin/pkg/manager/impl/execution_manager.go @@ -11,6 +11,7 @@ import ( "github.com/golang/protobuf/ptypes" "github.com/golang/protobuf/ptypes/timestamp" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" "github.com/flyteorg/flyte/flyteadmin/auth" @@ -402,15 +403,34 @@ func (m *ExecutionManager) getExecutionConfig(ctx context.Context, request *admi return workflowExecConfig, nil } -func (m *ExecutionManager) getClusterAssignment(ctx context.Context, request *admin.ExecutionCreateRequest) ( - *admin.ClusterAssignment, error) { - if request.Spec.ClusterAssignment != nil { - return request.Spec.ClusterAssignment, nil +func (m *ExecutionManager) getClusterAssignment(ctx context.Context, req *admin.ExecutionCreateRequest) (*admin.ClusterAssignment, error) { + storedAssignment, err := m.fetchClusterAssignment(ctx, req.Project, req.Domain) + if err != nil { + return nil, err + } + + reqAssignment := req.GetSpec().GetClusterAssignment() + reqPool := reqAssignment.GetClusterPoolName() + storedPool := storedAssignment.GetClusterPoolName() + if reqPool == "" { + return storedAssignment, nil + } + + if storedPool == "" { + return reqAssignment, nil } + if reqPool != storedPool { + return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "execution with project %q and domain %q cannot run on cluster pool %q, because its configured to run on pool %q", req.Project, req.Domain, reqPool, storedPool) + } + + return storedAssignment, nil +} + +func (m *ExecutionManager) fetchClusterAssignment(ctx context.Context, project, domain string) (*admin.ClusterAssignment, error) { resource, err := m.resourceManager.GetResource(ctx, interfaces.ResourceRequest{ - Project: request.Project, - Domain: request.Domain, + Project: project, + Domain: domain, ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT, }) if err != nil && !errors.IsDoesNotExistError(err) { @@ -420,11 +440,13 @@ func (m *ExecutionManager) getClusterAssignment(ctx context.Context, request *ad if resource != nil && resource.Attributes.GetClusterAssignment() != nil { return resource.Attributes.GetClusterAssignment(), nil } - clusterPoolAssignment := m.config.ClusterPoolAssignmentConfiguration().GetClusterPoolAssignments()[request.GetDomain()] - return &admin.ClusterAssignment{ - ClusterPoolName: clusterPoolAssignment.Pool, - }, nil + var clusterAssignment *admin.ClusterAssignment + domainAssignment := m.config.ClusterPoolAssignmentConfiguration().GetClusterPoolAssignments()[domain] + if domainAssignment.Pool != "" { + clusterAssignment = &admin.ClusterAssignment{ClusterPoolName: domainAssignment.Pool} + } + return clusterAssignment, nil } func (m *ExecutionManager) launchSingleTaskExecution( @@ -445,7 +467,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( return nil, nil, err } - // Prepare a skeleton workflow + // Prepare a skeleton workflow and launch plan taskIdentifier := request.Spec.LaunchPlan workflowModel, err := util.CreateOrGetWorkflowModel(ctx, request, m.db, m.workflowManager, m.namedEntityManager, taskIdentifier, &task) @@ -457,13 +479,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( if err != nil { return nil, nil, err } - closure, err := util.FetchAndGetWorkflowClosure(ctx, m.storageClient, workflowModel.RemoteClosureIdentifier) - if err != nil { - return nil, nil, err - } - closure.CreatedAt = workflow.Closure.CreatedAt - workflow.Closure = closure - // Also prepare a skeleton launch plan. + launchPlan, err := util.CreateOrGetLaunchPlan(ctx, m.db, m.config, taskIdentifier, workflow.Closure.CompiledWorkflow.Primary.Template.Interface, workflowModel.ID, request.Spec) if err != nil { @@ -488,6 +504,40 @@ func (m *ExecutionManager) launchSingleTaskExecution( Domain: request.Domain, Name: name, } + + // Overlap the blob store reads and writes + getClosureGroup, getClosureGroupCtx := errgroup.WithContext(ctx) + var closure *admin.WorkflowClosure + getClosureGroup.Go(func() error { + var err error + closure, err = util.FetchAndGetWorkflowClosure(getClosureGroupCtx, m.storageClient, workflowModel.RemoteClosureIdentifier) + return err + }) + + offloadInputsGroup, offloadInputsGroupCtx := errgroup.WithContext(ctx) + var inputsURI storage.DataReference + offloadInputsGroup.Go(func() error { + var err error + inputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, executionInputs, // or request.Inputs? + workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) + return err + }) + + var userInputsURI storage.DataReference + offloadInputsGroup.Go(func() error { + var err error + userInputsURI, err = common.OffloadLiteralMap(offloadInputsGroupCtx, m.storageClient, request.Inputs, + workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) + return err + }) + + err = getClosureGroup.Wait() + if err != nil { + return nil, nil, err + } + closure.CreatedAt = workflow.Closure.CreatedAt + workflow.Closure = closure + ctx = getExecutionContext(ctx, workflowExecutionID) namespace := common.GetNamespaceName( m.config.NamespaceMappingConfiguration().GetNamespaceTemplate(), workflowExecutionID.Project, workflowExecutionID.Domain) @@ -515,14 +565,6 @@ func (m *ExecutionManager) launchSingleTaskExecution( // Dynamically assign execution queues. m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) - inputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, request.Inputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) - if err != nil { - return nil, nil, err - } - userInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, request.Inputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) - if err != nil { - return nil, nil, err - } executionConfig, err := m.getExecutionConfig(ctx, request, nil) if err != nil { return nil, nil, err @@ -583,6 +625,11 @@ func (m *ExecutionManager) launchSingleTaskExecution( executionParameters.RecoveryExecution = request.Spec.Metadata.ReferenceExecution } + err = offloadInputsGroup.Wait() + if err != nil { + return nil, nil, err + } + workflowExecutor := plugins.Get[workflowengineInterfaces.WorkflowExecutor](m.pluginRegistry, plugins.PluginIDWorkflowExecutor) execInfo, err := workflowExecutor.Execute(ctx, workflowengineInterfaces.ExecutionData{ Namespace: namespace, @@ -833,13 +880,18 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( logger.Debugf(ctx, "Failed to validate ExecutionCreateRequest %+v with err %v", request, err) return nil, nil, nil, err } + if request.Spec.LaunchPlan.ResourceType == core.ResourceType_TASK { logger.Debugf(ctx, "Launching single task execution with [%+v]", request.Spec.LaunchPlan) // When tasks can have defaults this will need to handle Artifacts as well. ctx, model, err := m.launchSingleTaskExecution(ctx, request, requestedAt) return ctx, model, nil, err } + return m.launchExecution(ctx, request, requestedAt) +} +func (m *ExecutionManager) launchExecution( + ctx context.Context, request *admin.ExecutionCreateRequest, requestedAt time.Time) (context.Context, *models.Execution, []*models.ExecutionTag, error) { launchPlanModel, err := util.GetLaunchPlanModel(ctx, m.db, request.Spec.LaunchPlan) if err != nil { logger.Debugf(ctx, "Failed to get launch plan model for ExecutionCreateRequest %+v with err %v", request, err) @@ -880,13 +932,6 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) return nil, nil, nil, err } - closure, err := util.FetchAndGetWorkflowClosure(ctx, m.storageClient, workflowModel.RemoteClosureIdentifier) - if err != nil { - logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) - return nil, nil, nil, err - } - closure.CreatedAt = workflow.Closure.CreatedAt - workflow.Closure = closure name := util.GetExecutionName(request) workflowExecutionID := &core.WorkflowExecutionIdentifier{ @@ -894,6 +939,42 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( Domain: request.Domain, Name: name, } + + // Overlap the blob store reads and writes + group, groupCtx := errgroup.WithContext(ctx) + var closure *admin.WorkflowClosure + group.Go(func() error { + var err error + closure, err = util.FetchAndGetWorkflowClosure(groupCtx, m.storageClient, workflowModel.RemoteClosureIdentifier) + if err != nil { + logger.Debugf(ctx, "Failed to get workflow with id %+v with err %v", launchPlan.Spec.WorkflowId, err) + } + return err + }) + + var inputsURI storage.DataReference + group.Go(func() error { + var err error + inputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, executionInputs, + workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) + return err + }) + + var userInputsURI storage.DataReference + group.Go(func() error { + var err error + userInputsURI, err = common.OffloadLiteralMap(groupCtx, m.storageClient, request.Inputs, + workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) + return err + }) + + err = group.Wait() + if err != nil { + return nil, nil, nil, err + } + closure.CreatedAt = workflow.Closure.CreatedAt + workflow.Closure = closure + ctx = getExecutionContext(ctx, workflowExecutionID) var requestSpec = request.Spec if requestSpec.Metadata == nil { @@ -919,15 +1000,6 @@ func (m *ExecutionManager) launchExecutionAndPrepareModel( // Dynamically assign execution queues. m.populateExecutionQueue(ctx, workflow.Id, workflow.Closure.CompiledWorkflow) - inputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, executionInputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.Inputs) - if err != nil { - return nil, nil, nil, err - } - userInputsURI, err := common.OffloadLiteralMap(ctx, m.storageClient, request.Inputs, workflowExecutionID.Project, workflowExecutionID.Domain, workflowExecutionID.Name, shared.UserInputs) - if err != nil { - return nil, nil, nil, err - } - executionConfig, err := m.getExecutionConfig(ctx, request, launchPlan) if err != nil { return nil, nil, nil, err @@ -1538,16 +1610,31 @@ func (m *ExecutionManager) GetExecutionData( return nil, err } } - inputs, inputURLBlob, err := util.GetInputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, executionModel.InputsURI.String()) - if err != nil { - return nil, err - } - outputs, outputURLBlob, err := util.GetOutputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, util.ToExecutionClosureInterface(execution.Closure)) + + var inputs *core.LiteralMap + var inputURLBlob *admin.UrlBlob + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + var err error + inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, executionModel.InputsURI.String()) + return err + }) + + var outputs *core.LiteralMap + var outputURLBlob *admin.UrlBlob + group.Go(func() error { + var err error + outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, util.ToExecutionClosureInterface(execution.Closure)) + return err + }) + + err = group.Wait() if err != nil { return nil, err } + response := &admin.WorkflowExecutionGetDataResponse{ Inputs: inputURLBlob, Outputs: outputURLBlob, diff --git a/flyteadmin/pkg/manager/impl/execution_manager_test.go b/flyteadmin/pkg/manager/impl/execution_manager_test.go index 1cf2713083..5e874a4589 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/execution_manager_test.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "strings" + "sync" "testing" "time" @@ -238,8 +239,11 @@ func setDefaultTaskCallbackForExecTest(repository interfaces.Repository) { func getMockStorageForExecTest(ctx context.Context) *storage.DataStore { mockStorage := commonMocks.GetMockStorageClient() + var mtx sync.RWMutex mockStorage.ComposedProtobufStore.(*commonMocks.TestDataStore).ReadProtobufCb = func( ctx context.Context, reference storage.DataReference, msg proto.Message) error { + mtx.RLock() + defer mtx.RUnlock() if val, ok := mockStorage.ComposedProtobufStore.(*commonMocks.TestDataStore).Store[reference]; ok { _ = proto.Unmarshal(val, msg) return nil @@ -252,6 +256,8 @@ func getMockStorageForExecTest(ctx context.Context) *storage.DataStore { if err != nil { return err } + mtx.Lock() + defer mtx.Unlock() mockStorage.ComposedProtobufStore.(*commonMocks.TestDataStore).Store[reference] = bytes return nil } @@ -298,8 +304,7 @@ func TestCreateExecution(t *testing.T) { }} repository.ProjectRepo().(*repositoryMocks.MockProjectRepo).GetFunction = func( ctx context.Context, projectID string) (models.Project, error) { - return transformers.CreateProjectModel(&admin.Project{ - Labels: &labels}), nil + return transformers.CreateProjectModel(&admin.Project{Labels: &labels}), nil } clusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} @@ -376,8 +381,6 @@ func TestCreateExecution(t *testing.T) { mockConfig := getMockExecutionsConfigProvider() mockConfig.(*runtimeMocks.MockConfigurationProvider).AddQualityOfServiceConfiguration(qosProvider) - - execManager := NewExecutionManager(repository, r, mockConfig, getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, &mockPublisher, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) request := testutils.GetExecutionRequest() request.Spec.Metadata = &admin.ExecutionMetadata{ Principal: "unused - populated from authenticated context", @@ -386,16 +389,18 @@ func TestCreateExecution(t *testing.T) { request.Spec.ClusterAssignment = &clusterAssignment request.Spec.ExecutionClusterLabel = &admin.ExecutionClusterLabel{Value: executionClusterLabel} + execManager := NewExecutionManager(repository, r, mockConfig, getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, &mockPublisher, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) + identity, err := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) assert.NoError(t, err) ctx := identity.WithContext(context.Background()) response, err := execManager.CreateExecution(ctx, request, requestedAt) - assert.Nil(t, err) + assert.NoError(t, err) expectedResponse := &admin.ExecutionCreateResponse{ Id: &executionIdentifier, } - assert.Nil(t, err) + assert.NoError(t, err) assert.True(t, proto.Equal(expectedResponse.Id, response.Id)) // TODO: Check for offloaded inputs @@ -626,7 +631,6 @@ func TestCreateExecutionInCompatibleInputs(t *testing.T) { } func TestCreateExecutionPropellerFailure(t *testing.T) { - clusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} repository := getMockRepositoryForExecTest() setDefaultLpCallbackForExecTest(repository) expectedErr := flyteAdminErrors.NewFlyteAdminErrorf(codes.Internal, "ABC") @@ -660,7 +664,6 @@ func TestCreateExecutionPropellerFailure(t *testing.T) { Principal: "unused - populated from authenticated context", } request.Spec.RawOutputDataConfig = &admin.RawOutputDataConfig{OutputLocationPrefix: rawOutput} - request.Spec.ClusterAssignment = &clusterAssignment identity, err := auth.NewIdentityContext("", principal, "", time.Now(), sets.NewString(), nil, nil) assert.NoError(t, err) @@ -2002,6 +2005,10 @@ func TestRecoverExecution_GetExistingInputsFailure(t *testing.T) { ctx context.Context, reference storage.DataReference, msg proto.Message) error { return expectedErr } + mockStorage.ComposedProtobufStore.(*commonMocks.TestDataStore).WriteProtobufCb = func( + ctx context.Context, reference storage.DataReference, opts storage.Options, msg proto.Message) error { + return nil + } r := plugins.NewRegistry() r.RegisterDefault(plugins.PluginIDWorkflowExecutor, &defaultTestExecutor) execManager := NewExecutionManager(repository, r, getMockExecutionsConfigProvider(), mockStorage, mockScope.NewTestScope(), mockScope.NewTestScope(), &mockPublisher, mockExecutionRemoteURL, nil, nil, nil, nil, &eventWriterMocks.WorkflowExecutionEventWriter{}) @@ -5457,8 +5464,32 @@ func TestGetClusterAssignment(t *testing.T) { assert.NoError(t, err) assert.True(t, proto.Equal(ca, &clusterAssignment)) }) - t.Run("value from request", func(t *testing.T) { - reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "swimming-pool"} + t.Run("value from config", func(t *testing.T) { + customCP := "my_cp" + clusterPoolAsstProvider := &runtimeIFaceMocks.ClusterPoolAssignmentConfiguration{} + clusterPoolAsstProvider.OnGetClusterPoolAssignments().Return(runtimeInterfaces.ClusterPoolAssignments{ + workflowIdentifier.GetDomain(): runtimeInterfaces.ClusterPoolAssignment{ + Pool: customCP, + }, + }) + mockConfig := getMockExecutionsConfigProvider() + mockConfig.(*runtimeMocks.MockConfigurationProvider).AddClusterPoolAssignmentConfiguration(clusterPoolAsstProvider) + + executionManager := ExecutionManager{ + resourceManager: &managerMocks.MockResourceManager{}, + config: mockConfig, + } + + ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{}, + }) + assert.NoError(t, err) + assert.Equal(t, customCP, ca.GetClusterPoolName()) + }) + t.Run("value from request matches value from config", func(t *testing.T) { + reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ Project: workflowIdentifier.Project, Domain: workflowIdentifier.Domain, @@ -5469,12 +5500,30 @@ func TestGetClusterAssignment(t *testing.T) { assert.NoError(t, err) assert.True(t, proto.Equal(ca, &reqClusterAssignment)) }) - t.Run("value from config", func(t *testing.T) { - customCP := "my_cp" + t.Run("no value in DB nor in config, takes value from request", func(t *testing.T) { + mockConfig := getMockExecutionsConfigProvider() + + executionManager := ExecutionManager{ + resourceManager: &managerMocks.MockResourceManager{}, + config: mockConfig, + } + + reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} + ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{ + ClusterAssignment: &reqClusterAssignment, + }, + }) + assert.NoError(t, err) + assert.True(t, proto.Equal(ca, &reqClusterAssignment)) + }) + t.Run("empty value in DB, takes value from request", func(t *testing.T) { clusterPoolAsstProvider := &runtimeIFaceMocks.ClusterPoolAssignmentConfiguration{} clusterPoolAsstProvider.OnGetClusterPoolAssignments().Return(runtimeInterfaces.ClusterPoolAssignments{ workflowIdentifier.GetDomain(): runtimeInterfaces.ClusterPoolAssignment{ - Pool: customCP, + Pool: "", }, }) mockConfig := getMockExecutionsConfigProvider() @@ -5485,13 +5534,56 @@ func TestGetClusterAssignment(t *testing.T) { config: mockConfig, } + reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "gpu"} ca, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ Project: workflowIdentifier.Project, Domain: workflowIdentifier.Domain, - Spec: &admin.ExecutionSpec{}, + Spec: &admin.ExecutionSpec{ + ClusterAssignment: &reqClusterAssignment, + }, }) assert.NoError(t, err) - assert.Equal(t, customCP, ca.GetClusterPoolName()) + assert.True(t, proto.Equal(ca, &reqClusterAssignment)) + }) + t.Run("value from request doesn't match value from config", func(t *testing.T) { + reqClusterAssignment := admin.ClusterAssignment{ClusterPoolName: "swimming-pool"} + _, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{ + ClusterAssignment: &reqClusterAssignment, + }, + }) + st, ok := status.FromError(err) + assert.True(t, ok) + assert.Equal(t, codes.InvalidArgument, st.Code()) + assert.Equal(t, `execution with project "project" and domain "domain" cannot run on cluster pool "swimming-pool", because its configured to run on pool "gpu"`, st.Message()) + }) + t.Run("db error", func(t *testing.T) { + expected := errors.New("fail db") + resourceManager.GetResourceFunc = func(ctx context.Context, + request managerInterfaces.ResourceRequest) (*managerInterfaces.ResourceResponse, error) { + assert.EqualValues(t, request, managerInterfaces.ResourceRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + ResourceType: admin.MatchableResource_CLUSTER_ASSIGNMENT, + }) + return &managerInterfaces.ResourceResponse{ + Attributes: &admin.MatchingAttributes{ + Target: &admin.MatchingAttributes_ClusterAssignment{ + ClusterAssignment: &clusterAssignment, + }, + }, + }, expected + } + + _, err := executionManager.getClusterAssignment(context.TODO(), &admin.ExecutionCreateRequest{ + Project: workflowIdentifier.Project, + Domain: workflowIdentifier.Domain, + Spec: &admin.ExecutionSpec{}, + }) + + assert.Equal(t, expected, err) }) } diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager.go b/flyteadmin/pkg/manager/impl/node_execution_manager.go index be498d52ae..2f0f60977c 100644 --- a/flyteadmin/pkg/manager/impl/node_execution_manager.go +++ b/flyteadmin/pkg/manager/impl/node_execution_manager.go @@ -7,6 +7,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" cloudeventInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/async/cloudevent/interfaces" @@ -406,11 +407,16 @@ func (m *NodeExecutionManager) listNodeExecutions( return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListNodeExecutions", requestToken) } + joinTableEntities := make(map[common.Entity]bool) + for _, filter := range filters { + joinTableEntities[filter.GetEntity()] = true + } listInput := repoInterfaces.ListResourceInput{ - Limit: int(limit), - Offset: offset, - InlineFilters: filters, - SortParameter: sortParameter, + Limit: int(limit), + Offset: offset, + InlineFilters: filters, + SortParameter: sortParameter, + JoinTableEntities: joinTableEntities, } listInput.MapFilters = mapFilters @@ -444,7 +450,7 @@ func (m *NodeExecutionManager) ListNodeExecutions( } ctx = getExecutionContext(ctx, request.WorkflowExecutionId) - identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId) + identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.NodeExecution) if err != nil { return nil, err } @@ -482,7 +488,7 @@ func (m *NodeExecutionManager) ListNodeExecutionsForTask( } ctx = getTaskExecutionContext(ctx, request.TaskExecutionId) identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters( - ctx, request.TaskExecutionId.NodeExecutionId.ExecutionId) + ctx, request.TaskExecutionId.NodeExecutionId.ExecutionId, common.NodeExecution) if err != nil { return nil, err } @@ -520,14 +526,26 @@ func (m *NodeExecutionManager) GetNodeExecutionData( return nil, err } - inputs, inputURLBlob, err := util.GetInputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, nodeExecution.InputUri) - if err != nil { - return nil, err - } + var inputs *core.LiteralMap + var inputURLBlob *admin.UrlBlob + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + var err error + inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, nodeExecution.InputUri) + return err + }) + + var outputs *core.LiteralMap + var outputURLBlob *admin.UrlBlob + group.Go(func() error { + var err error + outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, nodeExecution.Closure) + return err + }) - outputs, outputURLBlob, err := util.GetOutputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, nodeExecution.Closure) + err = group.Wait() if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go index cfc3db2bff..b43c785b33 100644 --- a/flyteadmin/pkg/manager/impl/node_execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/node_execution_manager_test.go @@ -784,17 +784,17 @@ func TestListNodeExecutionsLevelZero(t *testing.T) { assert.Equal(t, 1, input.Limit) assert.Equal(t, 2, input.Offset) assert.Len(t, input.InlineFilters, 3) - assert.Equal(t, common.Execution, input.InlineFilters[0].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[0].GetEntity()) queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() assert.Equal(t, "project", queryExpr.Args) assert.Equal(t, "execution_project = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[1].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[1].GetEntity()) queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() assert.Equal(t, "domain", queryExpr.Args) assert.Equal(t, "execution_domain = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[2].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[2].GetEntity()) queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() assert.Equal(t, "name", queryExpr.Args) assert.Equal(t, "execution_name = ?", queryExpr.Query) @@ -806,6 +806,10 @@ func TestListNodeExecutionsLevelZero(t *testing.T) { "parent_task_execution_id": nil, }, filter) + assert.EqualValues(t, input.JoinTableEntities, map[common.Entity]bool{ + common.NodeExecution: true, + }) + assert.Equal(t, "execution_domain asc", input.SortParameter.GetGormOrderExpr()) return interfaces.NodeExecutionCollectionOutput{ NodeExecutions: []models.NodeExecution{ @@ -904,17 +908,17 @@ func TestListNodeExecutionsWithParent(t *testing.T) { assert.Equal(t, 1, input.Limit) assert.Equal(t, 2, input.Offset) assert.Len(t, input.InlineFilters, 4) - assert.Equal(t, common.Execution, input.InlineFilters[0].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[0].GetEntity()) queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() assert.Equal(t, "project", queryExpr.Args) assert.Equal(t, "execution_project = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[1].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[1].GetEntity()) queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() assert.Equal(t, "domain", queryExpr.Args) assert.Equal(t, "execution_domain = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[2].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[2].GetEntity()) queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() assert.Equal(t, "name", queryExpr.Args) assert.Equal(t, "execution_name = ?", queryExpr.Query) @@ -979,6 +983,129 @@ func TestListNodeExecutionsWithParent(t *testing.T) { assert.Equal(t, "3", nodeExecutions.Token) } +func TestListNodeExecutions_WithJoinTableFilter(t *testing.T) { + repository := repositoryMocks.NewMockRepository() + expectedClosure := admin.NodeExecutionClosure{ + Phase: core.NodeExecution_SUCCEEDED, + } + expectedMetadata := admin.NodeExecutionMetaData{ + SpecNodeId: "spec_node_id", + RetryGroup: "retry_group", + } + metadataBytes, _ := proto.Marshal(&expectedMetadata) + closureBytes, _ := proto.Marshal(&expectedClosure) + + repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetListCallback( + func(ctx context.Context, input interfaces.ListResourceInput) ( + interfaces.NodeExecutionCollectionOutput, error) { + assert.Equal(t, 1, input.Limit) + assert.Equal(t, 2, input.Offset) + assert.Len(t, input.InlineFilters, 4) + assert.Equal(t, common.NodeExecution, input.InlineFilters[0].GetEntity()) + queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() + assert.Equal(t, "project", queryExpr.Args) + assert.Equal(t, "execution_project = ?", queryExpr.Query) + + assert.Equal(t, common.NodeExecution, input.InlineFilters[1].GetEntity()) + queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() + assert.Equal(t, "domain", queryExpr.Args) + assert.Equal(t, "execution_domain = ?", queryExpr.Query) + + assert.Equal(t, common.NodeExecution, input.InlineFilters[2].GetEntity()) + queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() + assert.Equal(t, "name", queryExpr.Args) + assert.Equal(t, "execution_name = ?", queryExpr.Query) + + assert.Equal(t, common.Execution, input.InlineFilters[3].GetEntity()) + queryExpr, _ = input.InlineFilters[3].GetGormQueryExpr() + assert.Equal(t, "SUCCEEDED", queryExpr.Args) + assert.Equal(t, "phase = ?", queryExpr.Query) + + assert.Len(t, input.MapFilters, 1) + filter := input.MapFilters[0].GetFilter() + assert.Equal(t, map[string]interface{}{ + "parent_id": nil, + "parent_task_execution_id": nil, + }, filter) + + assert.EqualValues(t, input.JoinTableEntities, map[common.Entity]bool{ + common.NodeExecution: true, + common.Execution: true, + }) + + assert.Equal(t, "execution_domain asc", input.SortParameter.GetGormOrderExpr()) + return interfaces.NodeExecutionCollectionOutput{ + NodeExecutions: []models.NodeExecution{ + { + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "node id", + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + }, + Phase: core.NodeExecution_SUCCEEDED.String(), + InputURI: "input uri", + StartedAt: &occurredAt, + Closure: closureBytes, + NodeExecutionMetadata: metadataBytes, + }, + }, + }, nil + }) + repository.NodeExecutionRepo().(*repositoryMocks.MockNodeExecutionRepo).SetGetWithChildrenCallback( + func( + ctx context.Context, input interfaces.NodeExecutionResource) (models.NodeExecution, error) { + return models.NodeExecution{ + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "node id", + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: "name", + }, + }, + Phase: core.NodeExecution_SUCCEEDED.String(), + InputURI: "input uri", + StartedAt: &occurredAt, + Closure: closureBytes, + NodeExecutionMetadata: metadataBytes, + }, nil + }) + nodeExecManager := NewNodeExecutionManager(repository, getMockExecutionsConfigProvider(), make([]string, 0), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockNodeExecutionRemoteURL, nil, nil, &eventWriterMocks.NodeExecutionEventWriter{}) + nodeExecutions, err := nodeExecManager.ListNodeExecutions(context.Background(), &admin.NodeExecutionListRequest{ + WorkflowExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Limit: 1, + Token: "2", + SortBy: &admin.Sort{ + Direction: admin.Sort_ASCENDING, + Key: "execution_domain", + }, + Filters: "eq(execution.phase, SUCCEEDED)", + }) + assert.NoError(t, err) + assert.Len(t, nodeExecutions.NodeExecutions, 1) + assert.True(t, proto.Equal(&admin.NodeExecution{ + Id: &core.NodeExecutionIdentifier{ + NodeId: "node id", + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + }, + InputUri: "input uri", + Closure: &expectedClosure, + Metadata: &expectedMetadata, + }, nodeExecutions.NodeExecutions[0])) + assert.Equal(t, "3", nodeExecutions.Token) +} + func TestListNodeExecutions_InvalidParams(t *testing.T) { nodeExecManager := NewNodeExecutionManager(nil, getMockExecutionsConfigProvider(), make([]string, 0), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockNodeExecutionRemoteURL, nil, nil, &eventWriterMocks.NodeExecutionEventWriter{}) _, err := nodeExecManager.ListNodeExecutions(context.Background(), &admin.NodeExecutionListRequest{ @@ -1120,17 +1247,17 @@ func TestListNodeExecutionsForTask(t *testing.T) { assert.Equal(t, 1, input.Limit) assert.Equal(t, 2, input.Offset) assert.Len(t, input.InlineFilters, 4) - assert.Equal(t, common.Execution, input.InlineFilters[0].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[0].GetEntity()) queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() assert.Equal(t, "project", queryExpr.Args) assert.Equal(t, "execution_project = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[1].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[1].GetEntity()) queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() assert.Equal(t, "domain", queryExpr.Args) assert.Equal(t, "execution_domain = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[2].GetEntity()) + assert.Equal(t, common.NodeExecution, input.InlineFilters[2].GetEntity()) queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() assert.Equal(t, "name", queryExpr.Args) assert.Equal(t, "execution_name = ?", queryExpr.Query) diff --git a/flyteadmin/pkg/manager/impl/signal_manager.go b/flyteadmin/pkg/manager/impl/signal_manager.go index 49bfc8ac45..f98edae674 100644 --- a/flyteadmin/pkg/manager/impl/signal_manager.go +++ b/flyteadmin/pkg/manager/impl/signal_manager.go @@ -72,7 +72,7 @@ func (s *SignalManager) ListSignals(ctx context.Context, request *admin.SignalLi } ctx = getExecutionContext(ctx, request.WorkflowExecutionId) - identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId) + identifierFilters, err := util.GetWorkflowExecutionIdentifierFilters(ctx, request.WorkflowExecutionId, common.Signal) if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager.go b/flyteadmin/pkg/manager/impl/task_execution_manager.go index ad5d7423b6..f8b8e12e21 100644 --- a/flyteadmin/pkg/manager/impl/task_execution_manager.go +++ b/flyteadmin/pkg/manager/impl/task_execution_manager.go @@ -7,6 +7,7 @@ import ( "github.com/golang/protobuf/proto" "github.com/prometheus/client_golang/prometheus" + "golang.org/x/sync/errgroup" "google.golang.org/grpc/codes" cloudeventInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/async/cloudevent/interfaces" @@ -246,7 +247,7 @@ func (m *TaskExecutionManager) ListTaskExecutions( } ctx = getNodeExecutionContext(ctx, request.NodeExecutionId) - identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.NodeExecutionId) + identifierFilters, err := util.GetNodeExecutionIdentifierFilters(ctx, request.NodeExecutionId, common.TaskExecution) if err != nil { return nil, err } @@ -266,12 +267,17 @@ func (m *TaskExecutionManager) ListTaskExecutions( return nil, errors.NewFlyteAdminErrorf(codes.InvalidArgument, "invalid pagination token %s for ListTaskExecutions", request.Token) } + joinTableEntities := make(map[common.Entity]bool) + for _, filter := range filters { + joinTableEntities[filter.GetEntity()] = true + } output, err := m.db.TaskExecutionRepo().List(ctx, repoInterfaces.ListResourceInput{ - InlineFilters: filters, - Offset: offset, - Limit: int(request.Limit), - SortParameter: sortParameter, + InlineFilters: filters, + Offset: offset, + Limit: int(request.Limit), + SortParameter: sortParameter, + JoinTableEntities: joinTableEntities, }) if err != nil { logger.Debugf(ctx, "Failed to list task executions with request [%+v] with err %v", @@ -310,13 +316,26 @@ func (m *TaskExecutionManager) GetTaskExecutionData( return nil, err } - inputs, inputURLBlob, err := util.GetInputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, taskExecution.InputUri) - if err != nil { - return nil, err - } - outputs, outputURLBlob, err := util.GetOutputs(ctx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), - m.storageClient, taskExecution.Closure) + var inputs *core.LiteralMap + var inputURLBlob *admin.UrlBlob + group, groupCtx := errgroup.WithContext(ctx) + group.Go(func() error { + var err error + inputs, inputURLBlob, err = util.GetInputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, taskExecution.InputUri) + return err + }) + + var outputs *core.LiteralMap + var outputURLBlob *admin.UrlBlob + group.Go(func() error { + var err error + outputs, outputURLBlob, err = util.GetOutputs(groupCtx, m.urlData, m.config.ApplicationConfiguration().GetRemoteDataConfig(), + m.storageClient, taskExecution.Closure) + return err + }) + + err = group.Wait() if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go index b59b1c1b31..7e2a14131e 100644 --- a/flyteadmin/pkg/manager/impl/task_execution_manager_test.go +++ b/flyteadmin/pkg/manager/impl/task_execution_manager_test.go @@ -644,22 +644,22 @@ func TestListTaskExecutions(t *testing.T) { assert.Equal(t, 1, input.Offset) assert.Len(t, input.InlineFilters, 4) - assert.Equal(t, common.Execution, input.InlineFilters[0].GetEntity()) + assert.Equal(t, common.TaskExecution, input.InlineFilters[0].GetEntity()) queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() assert.Equal(t, "exec project b", queryExpr.Args) assert.Equal(t, "execution_project = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[1].GetEntity()) + assert.Equal(t, common.TaskExecution, input.InlineFilters[1].GetEntity()) queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() assert.Equal(t, "exec domain b", queryExpr.Args) assert.Equal(t, "execution_domain = ?", queryExpr.Query) - assert.Equal(t, common.Execution, input.InlineFilters[2].GetEntity()) + assert.Equal(t, common.TaskExecution, input.InlineFilters[2].GetEntity()) queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() assert.Equal(t, "exec name b", queryExpr.Args) assert.Equal(t, "execution_name = ?", queryExpr.Query) - assert.Equal(t, common.NodeExecution, input.InlineFilters[3].GetEntity()) + assert.Equal(t, common.TaskExecution, input.InlineFilters[3].GetEntity()) queryExpr, _ = input.InlineFilters[3].GetGormQueryExpr() assert.Equal(t, "nodey b", queryExpr.Args) assert.Equal(t, "node_id = ?", queryExpr.Query) @@ -777,6 +777,179 @@ func TestListTaskExecutions(t *testing.T) { }, taskExecutions.TaskExecutions[1])) } +func TestListTaskExecutions_Filters(t *testing.T) { + repository := repositoryMocks.NewMockRepository() + + expectedLogs := []*core.TaskLog{{Uri: "test-log1.txt"}} + extraLongErrMsg := string(make([]byte, 2*100)) + expectedOutputResult := &admin.TaskExecutionClosure_Error{ + Error: &core.ExecutionError{ + Message: extraLongErrMsg, + }, + } + expectedClosure := &admin.TaskExecutionClosure{ + StartedAt: sampleTaskEventOccurredAt, + Phase: core.TaskExecution_SUCCEEDED, + Duration: ptypes.DurationProto(time.Minute), + OutputResult: expectedOutputResult, + Logs: expectedLogs, + } + + closureBytes, _ := proto.Marshal(expectedClosure) + + firstRetryAttempt := uint32(1) + secondRetryAttempt := uint32(2) + listTaskExecutionsCalled := false + repository.TaskExecutionRepo().(*repositoryMocks.MockTaskExecutionRepo).SetListCallback( + func(ctx context.Context, input interfaces.ListResourceInput) (interfaces.TaskExecutionCollectionOutput, error) { + listTaskExecutionsCalled = true + assert.Equal(t, 99, input.Limit) + assert.Equal(t, 1, input.Offset) + + assert.Len(t, input.InlineFilters, 5) + assert.Equal(t, common.TaskExecution, input.InlineFilters[0].GetEntity()) + queryExpr, _ := input.InlineFilters[0].GetGormQueryExpr() + assert.Equal(t, "exec project b", queryExpr.Args) + assert.Equal(t, "execution_project = ?", queryExpr.Query) + + assert.Equal(t, common.TaskExecution, input.InlineFilters[1].GetEntity()) + queryExpr, _ = input.InlineFilters[1].GetGormQueryExpr() + assert.Equal(t, "exec domain b", queryExpr.Args) + assert.Equal(t, "execution_domain = ?", queryExpr.Query) + + assert.Equal(t, common.TaskExecution, input.InlineFilters[2].GetEntity()) + queryExpr, _ = input.InlineFilters[2].GetGormQueryExpr() + assert.Equal(t, "exec name b", queryExpr.Args) + assert.Equal(t, "execution_name = ?", queryExpr.Query) + + assert.Equal(t, common.TaskExecution, input.InlineFilters[3].GetEntity()) + queryExpr, _ = input.InlineFilters[3].GetGormQueryExpr() + assert.Equal(t, "nodey b", queryExpr.Args) + assert.Equal(t, "node_id = ?", queryExpr.Query) + + assert.Equal(t, common.Execution, input.InlineFilters[4].GetEntity()) + queryExpr, _ = input.InlineFilters[4].GetGormQueryExpr() + assert.Equal(t, "SUCCEEDED", queryExpr.Args) + assert.Equal(t, "phase = ?", queryExpr.Query) + assert.EqualValues(t, input.JoinTableEntities, map[common.Entity]bool{ + common.TaskExecution: true, + common.Execution: true, + }) + + return interfaces.TaskExecutionCollectionOutput{ + TaskExecutions: []models.TaskExecution{ + { + TaskExecutionKey: models.TaskExecutionKey{ + TaskKey: models.TaskKey{ + Project: "task project a", + Domain: "task domain a", + Name: "task name a", + Version: "task version a", + }, + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "nodey a", + ExecutionKey: models.ExecutionKey{ + Project: "exec project a", + Domain: "exec domain a", + Name: "exec name a", + }, + }, + RetryAttempt: &firstRetryAttempt, + }, + Phase: core.TaskExecution_SUCCEEDED.String(), + InputURI: "input-uri.pb", + StartedAt: &taskStartedAt, + Closure: closureBytes, + }, + { + TaskExecutionKey: models.TaskExecutionKey{ + TaskKey: models.TaskKey{ + Project: "task project b", + Domain: "task domain b", + Name: "task name b", + Version: "task version b", + }, + NodeExecutionKey: models.NodeExecutionKey{ + NodeID: "nodey b", + ExecutionKey: models.ExecutionKey{ + Project: "exec project b", + Domain: "exec domain b", + Name: "exec name b", + }, + }, + RetryAttempt: &secondRetryAttempt, + }, + Phase: core.TaskExecution_SUCCEEDED.String(), + InputURI: "input-uri2.pb", + StartedAt: &taskStartedAt, + Closure: closureBytes, + }, + }, + }, nil + }) + taskExecManager := NewTaskExecutionManager(repository, getMockExecutionsConfigProvider(), getMockStorageForExecTest(context.Background()), mockScope.NewTestScope(), mockTaskExecutionRemoteURL, nil, nil) + taskExecutions, err := taskExecManager.ListTaskExecutions(context.Background(), &admin.TaskExecutionListRequest{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + NodeId: "nodey b", + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "exec project b", + Domain: "exec domain b", + Name: "exec name b", + }, + }, + Token: "1", + Limit: 99, + Filters: "eq(execution.phase, SUCCEEDED)", + }) + assert.Nil(t, err) + assert.True(t, listTaskExecutionsCalled) + + assert.True(t, proto.Equal(&admin.TaskExecution{ + Id: &core.TaskExecutionIdentifier{ + RetryAttempt: firstRetryAttempt, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "exec project a", + Domain: "exec domain a", + Name: "exec name a", + }, + NodeId: "nodey a", + }, + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "task project a", + Domain: "task domain a", + Name: "task name a", + Version: "task version a", + }, + }, + InputUri: "input-uri.pb", + Closure: expectedClosure, + }, taskExecutions.TaskExecutions[0])) + assert.True(t, proto.Equal(&admin.TaskExecution{ + Id: &core.TaskExecutionIdentifier{ + RetryAttempt: secondRetryAttempt, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "exec project b", + Domain: "exec domain b", + Name: "exec name b", + }, + NodeId: "nodey b", + }, + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "task project b", + Domain: "task domain b", + Name: "task name b", + Version: "task version b", + }, + }, + InputUri: "input-uri2.pb", + Closure: expectedClosure, + }, taskExecutions.TaskExecutions[1])) +} + func TestListTaskExecutions_NoFilters(t *testing.T) { repository := repositoryMocks.NewMockRepository() diff --git a/flyteadmin/pkg/manager/impl/util/filters.go b/flyteadmin/pkg/manager/impl/util/filters.go index 81cb55a994..377dcdab51 100644 --- a/flyteadmin/pkg/manager/impl/util/filters.go +++ b/flyteadmin/pkg/manager/impl/util/filters.go @@ -271,10 +271,10 @@ func GetDbFilters(spec FilterSpec, primaryEntity common.Entity) ([]common.Inline } func GetWorkflowExecutionIdentifierFilters( - ctx context.Context, workflowExecutionIdentifier *core.WorkflowExecutionIdentifier) ([]common.InlineFilter, error) { + ctx context.Context, workflowExecutionIdentifier *core.WorkflowExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) { identifierFilters := make([]common.InlineFilter, 3) identifierProjectFilter, err := GetSingleValueEqualityFilter( - common.Execution, shared.Project, workflowExecutionIdentifier.Project) + entity, shared.Project, workflowExecutionIdentifier.Project) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for project: %s with identifier [%+v]", workflowExecutionIdentifier.Project, workflowExecutionIdentifier) @@ -283,7 +283,7 @@ func GetWorkflowExecutionIdentifierFilters( identifierFilters[0] = identifierProjectFilter identifierDomainFilter, err := GetSingleValueEqualityFilter( - common.Execution, shared.Domain, workflowExecutionIdentifier.Domain) + entity, shared.Domain, workflowExecutionIdentifier.Domain) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]", workflowExecutionIdentifier.Domain, workflowExecutionIdentifier) @@ -292,7 +292,7 @@ func GetWorkflowExecutionIdentifierFilters( identifierFilters[1] = identifierDomainFilter identifierNameFilter, err := GetSingleValueEqualityFilter( - common.Execution, shared.Name, workflowExecutionIdentifier.Name) + entity, shared.Name, workflowExecutionIdentifier.Name) if err != nil { logger.Warningf(ctx, "Failed to create execution identifier filter for domain: %s with identifier [%+v]", workflowExecutionIdentifier.Name, workflowExecutionIdentifier) @@ -304,14 +304,14 @@ func GetWorkflowExecutionIdentifierFilters( // All inputs to this function must be validated. func GetNodeExecutionIdentifierFilters( - ctx context.Context, nodeExecutionIdentifier *core.NodeExecutionIdentifier) ([]common.InlineFilter, error) { + ctx context.Context, nodeExecutionIdentifier *core.NodeExecutionIdentifier, entity common.Entity) ([]common.InlineFilter, error) { workflowExecutionIdentifierFilters, err := - GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.ExecutionId) + GetWorkflowExecutionIdentifierFilters(ctx, nodeExecutionIdentifier.ExecutionId, entity) if err != nil { return nil, err } nodeIDFilter, err := GetSingleValueEqualityFilter( - common.NodeExecution, shared.NodeID, nodeExecutionIdentifier.NodeId) + entity, shared.NodeID, nodeExecutionIdentifier.NodeId) if err != nil { logger.Warningf(ctx, "Failed to create node execution identifier filter for node id: %s with identifier [%+v]", nodeExecutionIdentifier.NodeId, nodeExecutionIdentifier) diff --git a/flyteadmin/pkg/manager/impl/util/filters_test.go b/flyteadmin/pkg/manager/impl/util/filters_test.go index 29c1116a8e..72a8c9971b 100644 --- a/flyteadmin/pkg/manager/impl/util/filters_test.go +++ b/flyteadmin/pkg/manager/impl/util/filters_test.go @@ -176,7 +176,7 @@ func TestGetWorkflowExecutionIdentifierFilters(t *testing.T) { Project: "ex project", Domain: "ex domain", Name: "ex name", - }) + }, common.Execution) assert.Nil(t, err) assert.Len(t, identifierFilters, 3) @@ -205,26 +205,26 @@ func TestGetNodeExecutionIdentifierFilters(t *testing.T) { Name: "ex name", }, NodeId: "nodey", - }) + }, common.TaskExecution) assert.Nil(t, err) assert.Len(t, identifierFilters, 4) - assert.Equal(t, common.Execution, identifierFilters[0].GetEntity()) + assert.Equal(t, common.TaskExecution, identifierFilters[0].GetEntity()) queryExpr, _ := identifierFilters[0].GetGormQueryExpr() assert.Equal(t, "ex project", queryExpr.Args) assert.Equal(t, "execution_project = ?", queryExpr.Query) - assert.Equal(t, common.Execution, identifierFilters[1].GetEntity()) + assert.Equal(t, common.TaskExecution, identifierFilters[1].GetEntity()) queryExpr, _ = identifierFilters[1].GetGormQueryExpr() assert.Equal(t, "ex domain", queryExpr.Args) assert.Equal(t, "execution_domain = ?", queryExpr.Query) - assert.Equal(t, common.Execution, identifierFilters[2].GetEntity()) + assert.Equal(t, common.TaskExecution, identifierFilters[2].GetEntity()) queryExpr, _ = identifierFilters[2].GetGormQueryExpr() assert.Equal(t, "ex name", queryExpr.Args) assert.Equal(t, "execution_name = ?", queryExpr.Query) - assert.Equal(t, common.NodeExecution, identifierFilters[3].GetEntity()) + assert.Equal(t, common.TaskExecution, identifierFilters[3].GetEntity()) queryExpr, _ = identifierFilters[3].GetGormQueryExpr() assert.Equal(t, "nodey", queryExpr.Args) assert.Equal(t, "node_id = ?", queryExpr.Query) diff --git a/flyteadmin/pkg/manager/interfaces/resource.go b/flyteadmin/pkg/manager/interfaces/resource.go index 928a910d6c..3d586a59c9 100644 --- a/flyteadmin/pkg/manager/interfaces/resource.go +++ b/flyteadmin/pkg/manager/interfaces/resource.go @@ -6,6 +6,8 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" ) +//go:generate mockery -name ResourceInterface -output=../mocks -case=underscore + // ResourceInterface manages project, domain and workflow -specific attributes. type ResourceInterface interface { ListAll(ctx context.Context, request *admin.ListMatchableAttributesRequest) ( diff --git a/flyteadmin/pkg/manager/mocks/resource_interface.go b/flyteadmin/pkg/manager/mocks/resource_interface.go new file mode 100644 index 0000000000..c1b416eb9d --- /dev/null +++ b/flyteadmin/pkg/manager/mocks/resource_interface.go @@ -0,0 +1,469 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + admin "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" + + interfaces "github.com/flyteorg/flyte/flyteadmin/pkg/manager/interfaces" + + mock "github.com/stretchr/testify/mock" +) + +// ResourceInterface is an autogenerated mock type for the ResourceInterface type +type ResourceInterface struct { + mock.Mock +} + +type ResourceInterface_DeleteProjectAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_DeleteProjectAttributes) Return(_a0 *admin.ProjectAttributesDeleteResponse, _a1 error) *ResourceInterface_DeleteProjectAttributes { + return &ResourceInterface_DeleteProjectAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnDeleteProjectAttributes(ctx context.Context, request *admin.ProjectAttributesDeleteRequest) *ResourceInterface_DeleteProjectAttributes { + c_call := _m.On("DeleteProjectAttributes", ctx, request) + return &ResourceInterface_DeleteProjectAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnDeleteProjectAttributesMatch(matchers ...interface{}) *ResourceInterface_DeleteProjectAttributes { + c_call := _m.On("DeleteProjectAttributes", matchers...) + return &ResourceInterface_DeleteProjectAttributes{Call: c_call} +} + +// DeleteProjectAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) DeleteProjectAttributes(ctx context.Context, request *admin.ProjectAttributesDeleteRequest) (*admin.ProjectAttributesDeleteResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectAttributesDeleteResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectAttributesDeleteRequest) *admin.ProjectAttributesDeleteResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectAttributesDeleteResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectAttributesDeleteRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_DeleteProjectDomainAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_DeleteProjectDomainAttributes) Return(_a0 *admin.ProjectDomainAttributesDeleteResponse, _a1 error) *ResourceInterface_DeleteProjectDomainAttributes { + return &ResourceInterface_DeleteProjectDomainAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnDeleteProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesDeleteRequest) *ResourceInterface_DeleteProjectDomainAttributes { + c_call := _m.On("DeleteProjectDomainAttributes", ctx, request) + return &ResourceInterface_DeleteProjectDomainAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnDeleteProjectDomainAttributesMatch(matchers ...interface{}) *ResourceInterface_DeleteProjectDomainAttributes { + c_call := _m.On("DeleteProjectDomainAttributes", matchers...) + return &ResourceInterface_DeleteProjectDomainAttributes{Call: c_call} +} + +// DeleteProjectDomainAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) DeleteProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesDeleteRequest) (*admin.ProjectDomainAttributesDeleteResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectDomainAttributesDeleteResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectDomainAttributesDeleteRequest) *admin.ProjectDomainAttributesDeleteResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectDomainAttributesDeleteResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectDomainAttributesDeleteRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_DeleteWorkflowAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_DeleteWorkflowAttributes) Return(_a0 *admin.WorkflowAttributesDeleteResponse, _a1 error) *ResourceInterface_DeleteWorkflowAttributes { + return &ResourceInterface_DeleteWorkflowAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnDeleteWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesDeleteRequest) *ResourceInterface_DeleteWorkflowAttributes { + c_call := _m.On("DeleteWorkflowAttributes", ctx, request) + return &ResourceInterface_DeleteWorkflowAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnDeleteWorkflowAttributesMatch(matchers ...interface{}) *ResourceInterface_DeleteWorkflowAttributes { + c_call := _m.On("DeleteWorkflowAttributes", matchers...) + return &ResourceInterface_DeleteWorkflowAttributes{Call: c_call} +} + +// DeleteWorkflowAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) DeleteWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesDeleteRequest) (*admin.WorkflowAttributesDeleteResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.WorkflowAttributesDeleteResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.WorkflowAttributesDeleteRequest) *admin.WorkflowAttributesDeleteResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.WorkflowAttributesDeleteResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.WorkflowAttributesDeleteRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_GetProjectAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_GetProjectAttributes) Return(_a0 *admin.ProjectAttributesGetResponse, _a1 error) *ResourceInterface_GetProjectAttributes { + return &ResourceInterface_GetProjectAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnGetProjectAttributes(ctx context.Context, request *admin.ProjectAttributesGetRequest) *ResourceInterface_GetProjectAttributes { + c_call := _m.On("GetProjectAttributes", ctx, request) + return &ResourceInterface_GetProjectAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnGetProjectAttributesMatch(matchers ...interface{}) *ResourceInterface_GetProjectAttributes { + c_call := _m.On("GetProjectAttributes", matchers...) + return &ResourceInterface_GetProjectAttributes{Call: c_call} +} + +// GetProjectAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) GetProjectAttributes(ctx context.Context, request *admin.ProjectAttributesGetRequest) (*admin.ProjectAttributesGetResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectAttributesGetRequest) *admin.ProjectAttributesGetResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectAttributesGetRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_GetProjectDomainAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_GetProjectDomainAttributes) Return(_a0 *admin.ProjectDomainAttributesGetResponse, _a1 error) *ResourceInterface_GetProjectDomainAttributes { + return &ResourceInterface_GetProjectDomainAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnGetProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesGetRequest) *ResourceInterface_GetProjectDomainAttributes { + c_call := _m.On("GetProjectDomainAttributes", ctx, request) + return &ResourceInterface_GetProjectDomainAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnGetProjectDomainAttributesMatch(matchers ...interface{}) *ResourceInterface_GetProjectDomainAttributes { + c_call := _m.On("GetProjectDomainAttributes", matchers...) + return &ResourceInterface_GetProjectDomainAttributes{Call: c_call} +} + +// GetProjectDomainAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) GetProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesGetRequest) (*admin.ProjectDomainAttributesGetResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectDomainAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectDomainAttributesGetRequest) *admin.ProjectDomainAttributesGetResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectDomainAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectDomainAttributesGetRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_GetResource struct { + *mock.Call +} + +func (_m ResourceInterface_GetResource) Return(_a0 *interfaces.ResourceResponse, _a1 error) *ResourceInterface_GetResource { + return &ResourceInterface_GetResource{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnGetResource(ctx context.Context, request interfaces.ResourceRequest) *ResourceInterface_GetResource { + c_call := _m.On("GetResource", ctx, request) + return &ResourceInterface_GetResource{Call: c_call} +} + +func (_m *ResourceInterface) OnGetResourceMatch(matchers ...interface{}) *ResourceInterface_GetResource { + c_call := _m.On("GetResource", matchers...) + return &ResourceInterface_GetResource{Call: c_call} +} + +// GetResource provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) GetResource(ctx context.Context, request interfaces.ResourceRequest) (*interfaces.ResourceResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *interfaces.ResourceResponse + if rf, ok := ret.Get(0).(func(context.Context, interfaces.ResourceRequest) *interfaces.ResourceResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*interfaces.ResourceResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, interfaces.ResourceRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_GetWorkflowAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_GetWorkflowAttributes) Return(_a0 *admin.WorkflowAttributesGetResponse, _a1 error) *ResourceInterface_GetWorkflowAttributes { + return &ResourceInterface_GetWorkflowAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnGetWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesGetRequest) *ResourceInterface_GetWorkflowAttributes { + c_call := _m.On("GetWorkflowAttributes", ctx, request) + return &ResourceInterface_GetWorkflowAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnGetWorkflowAttributesMatch(matchers ...interface{}) *ResourceInterface_GetWorkflowAttributes { + c_call := _m.On("GetWorkflowAttributes", matchers...) + return &ResourceInterface_GetWorkflowAttributes{Call: c_call} +} + +// GetWorkflowAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) GetWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesGetRequest) (*admin.WorkflowAttributesGetResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.WorkflowAttributesGetResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.WorkflowAttributesGetRequest) *admin.WorkflowAttributesGetResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.WorkflowAttributesGetResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.WorkflowAttributesGetRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_ListAll struct { + *mock.Call +} + +func (_m ResourceInterface_ListAll) Return(_a0 *admin.ListMatchableAttributesResponse, _a1 error) *ResourceInterface_ListAll { + return &ResourceInterface_ListAll{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnListAll(ctx context.Context, request *admin.ListMatchableAttributesRequest) *ResourceInterface_ListAll { + c_call := _m.On("ListAll", ctx, request) + return &ResourceInterface_ListAll{Call: c_call} +} + +func (_m *ResourceInterface) OnListAllMatch(matchers ...interface{}) *ResourceInterface_ListAll { + c_call := _m.On("ListAll", matchers...) + return &ResourceInterface_ListAll{Call: c_call} +} + +// ListAll provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) ListAll(ctx context.Context, request *admin.ListMatchableAttributesRequest) (*admin.ListMatchableAttributesResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ListMatchableAttributesResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ListMatchableAttributesRequest) *admin.ListMatchableAttributesResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ListMatchableAttributesResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ListMatchableAttributesRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_UpdateProjectAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_UpdateProjectAttributes) Return(_a0 *admin.ProjectAttributesUpdateResponse, _a1 error) *ResourceInterface_UpdateProjectAttributes { + return &ResourceInterface_UpdateProjectAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnUpdateProjectAttributes(ctx context.Context, request *admin.ProjectAttributesUpdateRequest) *ResourceInterface_UpdateProjectAttributes { + c_call := _m.On("UpdateProjectAttributes", ctx, request) + return &ResourceInterface_UpdateProjectAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnUpdateProjectAttributesMatch(matchers ...interface{}) *ResourceInterface_UpdateProjectAttributes { + c_call := _m.On("UpdateProjectAttributes", matchers...) + return &ResourceInterface_UpdateProjectAttributes{Call: c_call} +} + +// UpdateProjectAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) UpdateProjectAttributes(ctx context.Context, request *admin.ProjectAttributesUpdateRequest) (*admin.ProjectAttributesUpdateResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectAttributesUpdateResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectAttributesUpdateRequest) *admin.ProjectAttributesUpdateResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectAttributesUpdateResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectAttributesUpdateRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_UpdateProjectDomainAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_UpdateProjectDomainAttributes) Return(_a0 *admin.ProjectDomainAttributesUpdateResponse, _a1 error) *ResourceInterface_UpdateProjectDomainAttributes { + return &ResourceInterface_UpdateProjectDomainAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnUpdateProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesUpdateRequest) *ResourceInterface_UpdateProjectDomainAttributes { + c_call := _m.On("UpdateProjectDomainAttributes", ctx, request) + return &ResourceInterface_UpdateProjectDomainAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnUpdateProjectDomainAttributesMatch(matchers ...interface{}) *ResourceInterface_UpdateProjectDomainAttributes { + c_call := _m.On("UpdateProjectDomainAttributes", matchers...) + return &ResourceInterface_UpdateProjectDomainAttributes{Call: c_call} +} + +// UpdateProjectDomainAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) UpdateProjectDomainAttributes(ctx context.Context, request *admin.ProjectDomainAttributesUpdateRequest) (*admin.ProjectDomainAttributesUpdateResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.ProjectDomainAttributesUpdateResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.ProjectDomainAttributesUpdateRequest) *admin.ProjectDomainAttributesUpdateResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.ProjectDomainAttributesUpdateResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.ProjectDomainAttributesUpdateRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceInterface_UpdateWorkflowAttributes struct { + *mock.Call +} + +func (_m ResourceInterface_UpdateWorkflowAttributes) Return(_a0 *admin.WorkflowAttributesUpdateResponse, _a1 error) *ResourceInterface_UpdateWorkflowAttributes { + return &ResourceInterface_UpdateWorkflowAttributes{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceInterface) OnUpdateWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesUpdateRequest) *ResourceInterface_UpdateWorkflowAttributes { + c_call := _m.On("UpdateWorkflowAttributes", ctx, request) + return &ResourceInterface_UpdateWorkflowAttributes{Call: c_call} +} + +func (_m *ResourceInterface) OnUpdateWorkflowAttributesMatch(matchers ...interface{}) *ResourceInterface_UpdateWorkflowAttributes { + c_call := _m.On("UpdateWorkflowAttributes", matchers...) + return &ResourceInterface_UpdateWorkflowAttributes{Call: c_call} +} + +// UpdateWorkflowAttributes provides a mock function with given fields: ctx, request +func (_m *ResourceInterface) UpdateWorkflowAttributes(ctx context.Context, request *admin.WorkflowAttributesUpdateRequest) (*admin.WorkflowAttributesUpdateResponse, error) { + ret := _m.Called(ctx, request) + + var r0 *admin.WorkflowAttributesUpdateResponse + if rf, ok := ret.Get(0).(func(context.Context, *admin.WorkflowAttributesUpdateRequest) *admin.WorkflowAttributesUpdateResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*admin.WorkflowAttributesUpdateResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, *admin.WorkflowAttributesUpdateRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteadmin/pkg/repositories/config/migrations.go b/flyteadmin/pkg/repositories/config/migrations.go index 27da97f29c..d48356fe01 100644 --- a/flyteadmin/pkg/repositories/config/migrations.go +++ b/flyteadmin/pkg/repositories/config/migrations.go @@ -1263,20 +1263,28 @@ var ContinuedMigrations = []*gormigrate.Migration{ return tx.Migrator().DropTable("execution_tags") }, }, - { ID: "2024-06-06-drop-execution_admin-tags", Migrate: func(tx *gorm.DB) error { return tx.Migrator().DropTable("execution_admin_tags") }, }, - { ID: "2024-06-06-drop-admin-tags", Migrate: func(tx *gorm.DB) error { return tx.Migrator().DropTable("admin_tags") }, }, + { + ID: "2024-08-08-remove-input-uri-for-start-nodes", + Migrate: func(db *gorm.DB) error { + return db.Exec("UPDATE node_executions SET input_uri = '' WHERE node_id = 'start-node'").Error + }, + Rollback: func(db *gorm.DB) error { + // can't rollback missing data + return nil + }, + }, } var m = append(LegacyMigrations, NoopMigrations...) diff --git a/flyteadmin/pkg/repositories/config/seed_data.go b/flyteadmin/pkg/repositories/config/seed_data.go index b4433548de..bf09be0ff5 100644 --- a/flyteadmin/pkg/repositories/config/seed_data.go +++ b/flyteadmin/pkg/repositories/config/seed_data.go @@ -10,16 +10,61 @@ import ( "github.com/flyteorg/flyte/flytestdlib/logger" ) +type SeedProject struct { + Name string `json:"name" pflag:",Name of flyte project to create"` + Description string `json:"description" pflag:",Description of flyte project to create"` +} + +func UniqueProjectsFromNames(names []string) []SeedProject { + return uniqueProjects(names, nil) +} + +// MergeSeedProjectsWithUniqueNames merges seed projects from names and details while maintaining uniqueness +func MergeSeedProjectsWithUniqueNames(seedProjects []string, seedProjectsWithDetails []SeedProject) []SeedProject { + return uniqueProjects(seedProjects, seedProjectsWithDetails) +} + +func uniqueProjects(seedProjects []string, seedProjectsWithDetails []SeedProject) []SeedProject { + // Track unique project names + seen := make(map[string]struct{}) + + // Create the final result slice + var combinedProjects []SeedProject + + // First, add all projects from SeedProjectsWithDetails to the map + for _, project := range seedProjectsWithDetails { + // Handle the duplication + if _, exists := seen[project.Name]; !exists { + seen[project.Name] = struct{}{} + combinedProjects = append(combinedProjects, project) + } + } + + // Process SeedProjects + for _, projectName := range seedProjects { + // Check if project not exists in SeedProjectsWithDetails + if _, exists := seen[projectName]; !exists { + seen[projectName] = struct{}{} + combinedProjects = append(combinedProjects, SeedProject{ + Name: projectName, + Description: fmt.Sprintf("%s description", projectName), + }) + } + } + + return combinedProjects +} + // Returns a function to seed the database with default values. -func SeedProjects(db *gorm.DB, projects []string) error { +func SeedProjects(db *gorm.DB, projects []SeedProject) error { tx := db.Begin() for _, project := range projects { projectModel := models.Project{ - Identifier: project, - Name: project, - Description: fmt.Sprintf("%s description", project), + Identifier: project.Name, + Name: project.Name, + Description: project.Description, } - if err := tx.Where(models.Project{Identifier: project}).Omit("id").FirstOrCreate(&projectModel).Error; err != nil { + if err := tx.Where(models.Project{Identifier: project.Name}).Omit("id").FirstOrCreate(&projectModel).Error; err != nil { logger.Warningf(context.Background(), "failed to save project [%s]", project) tx.Rollback() return err diff --git a/flyteadmin/pkg/repositories/config/seed_data_test.go b/flyteadmin/pkg/repositories/config/seed_data_test.go new file mode 100644 index 0000000000..b937b30592 --- /dev/null +++ b/flyteadmin/pkg/repositories/config/seed_data_test.go @@ -0,0 +1,290 @@ +package config + +import ( + "testing" + + mocket "github.com/Selvatico/go-mocket" + "github.com/stretchr/testify/assert" + "gorm.io/gorm" +) + +func TestMergeSeedProjectsWithUniqueNames(t *testing.T) { + tests := []struct { + name string + seedProjects []string + seedProjectsWithDetails []SeedProject + want []SeedProject + }{ + { + name: "Empty inputs", + seedProjects: []string{}, + seedProjectsWithDetails: []SeedProject{}, + want: []SeedProject{}, + }, + { + name: "Empty inputs", + seedProjects: []string{}, + seedProjectsWithDetails: nil, + want: []SeedProject{}, + }, + { + name: "Only seedProjects", + seedProjects: []string{"project1", "project2"}, + seedProjectsWithDetails: nil, + want: []SeedProject{ + {Name: "project1", Description: "project1 description"}, + {Name: "project2", Description: "project2 description"}, + }, + }, + { + name: "Only seedProjectsWithDetails", + seedProjects: []string{}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project1", Description: "custom description 1"}, + {Name: "project2", Description: "custom description 2"}, + }, + want: []SeedProject{ + {Name: "project1", Description: "custom description 1"}, + {Name: "project2", Description: "custom description 2"}, + }, + }, + { + name: "Mixed with no overlaps", + seedProjects: []string{"project1", "project2"}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project3", Description: "custom description 3"}, + {Name: "project4", Description: "custom description 4"}, + }, + want: []SeedProject{ + {Name: "project3", Description: "custom description 3"}, + {Name: "project4", Description: "custom description 4"}, + {Name: "project1", Description: "project1 description"}, + {Name: "project2", Description: "project2 description"}, + }, + }, + { + name: "Mixed with overlaps", + seedProjects: []string{"project1", "project2", "project3"}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project2", Description: "custom description 2"}, + {Name: "project3", Description: "custom description 3"}, + }, + want: []SeedProject{ + {Name: "project2", Description: "custom description 2"}, + {Name: "project3", Description: "custom description 3"}, + {Name: "project1", Description: "project1 description"}, + }, + }, + { + name: "Duplicates in seedProjects", + seedProjects: []string{"project1", "project1", "project2"}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project3", Description: "custom description 3"}, + }, + want: []SeedProject{ + {Name: "project3", Description: "custom description 3"}, + {Name: "project1", Description: "project1 description"}, + {Name: "project2", Description: "project2 description"}, + }, + }, + { + name: "Duplicates in seedProjectsWithDetails", + seedProjects: []string{"project1"}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project2", Description: "custom description 2"}, + {Name: "project2", Description: "duplicate description 2"}, + }, + want: []SeedProject{ + {Name: "project2", Description: "custom description 2"}, + {Name: "project1", Description: "project1 description"}, + }, + }, + { + name: "All duplicates", + seedProjects: []string{"project1", "project1", "project2"}, + seedProjectsWithDetails: []SeedProject{ + {Name: "project1", Description: "custom description 1"}, + {Name: "project2", Description: "custom description 2"}, + {Name: "project2", Description: "duplicate description 2"}, + }, + want: []SeedProject{ + {Name: "project1", Description: "custom description 1"}, + {Name: "project2", Description: "custom description 2"}, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := MergeSeedProjectsWithUniqueNames(tt.seedProjects, tt.seedProjectsWithDetails) + + // Check length + if len(got) != len(tt.want) { + t.Errorf("length mismatch: got %d projects, want %d projects", len(got), len(tt.want)) + return + } + + gotMap := make(map[string]string) + for _, project := range got { + gotMap[project.Name] = project.Description + } + wantMap := make(map[string]string) + for _, project := range tt.want { + wantMap[project.Name] = project.Description + } + + for name, wantDesc := range wantMap { + if gotDesc, exists := gotMap[name]; !exists { + t.Errorf("missing project %q in result", name) + } else if gotDesc != wantDesc { + t.Errorf("project %q description mismatch: got %q, want %q", name, gotDesc, wantDesc) + } + } + + for name := range gotMap { + if _, exists := wantMap[name]; !exists { + t.Errorf("unexpected project %q in result", name) + } + } + }) + } +} + +func TestUniqueProjectsFromNames(t *testing.T) { + tests := []struct { + name string + names []string + want []SeedProject + }{ + { + name: "Empty input", + names: []string{}, + want: []SeedProject{}, + }, + { + name: "Single name", + names: []string{"project1"}, + want: []SeedProject{ + { + Name: "project1", + Description: "project1 description", + }, + }, + }, + { + name: "Multiple unique names", + names: []string{"project1", "project2", "project3"}, + want: []SeedProject{ + { + Name: "project1", + Description: "project1 description", + }, + { + Name: "project2", + Description: "project2 description", + }, + { + Name: "project3", + Description: "project3 description", + }, + }, + }, + { + name: "Duplicate names", + names: []string{"project1", "project1", "project2", "project2", "project3"}, + want: []SeedProject{ + { + Name: "project1", + Description: "project1 description", + }, + { + Name: "project2", + Description: "project2 description", + }, + { + Name: "project3", + Description: "project3 description", + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := UniqueProjectsFromNames(tt.names) + + if len(got) != len(tt.want) { + t.Errorf("length mismatch: got %d projects, want %d projects", len(got), len(tt.want)) + return + } + + gotMap := make(map[string]string) + for _, project := range got { + gotMap[project.Name] = project.Description + } + wantMap := make(map[string]string) + for _, project := range tt.want { + wantMap[project.Name] = project.Description + } + + // Compare contents + for name, wantDesc := range wantMap { + if gotDesc, exists := gotMap[name]; !exists { + t.Errorf("missing project %q in result", name) + } else if gotDesc != wantDesc { + t.Errorf("project %q description mismatch: got %q, want %q", name, gotDesc, wantDesc) + } + } + + for name := range gotMap { + if _, exists := wantMap[name]; !exists { + t.Errorf("unexpected project %q in result", name) + } + } + }) + } +} + +func TestSeedProjects(t *testing.T) { + gormDb := GetDbForTest(t) + defer mocket.Catcher.Reset() + + mocket.Catcher.Reset() + mocket.Catcher.NewMock().WithQuery(`SELECT * FROM "projects"`).WithReply([]map[string]interface{}{}) + + projects := []SeedProject{ + { + Name: "Project 1", + Description: "New Description", + }, + } + + // Execute + err := SeedProjects(gormDb, projects) + + // Assert + assert.NoError(t, err) +} + +func TestSeedProjectsWithDuplicateKey(t *testing.T) { + gormDb := GetDbForTest(t) + defer mocket.Catcher.Reset() + + // Mock the SELECT query for existence check + mocket.Catcher.Reset() + mocket.Catcher.NewMock().WithQuery(`INSERT INTO "projects"`).WithError(gorm.ErrDuplicatedKey) + + projects := []SeedProject{ + { + Name: "Project 1", + Description: "New Description", + }, + } + + // Execute + err := SeedProjects(gormDb, projects) + + // Assert + assert.Error(t, err) + +} diff --git a/flyteadmin/pkg/repositories/gormimpl/common.go b/flyteadmin/pkg/repositories/gormimpl/common.go index 40a54f8878..330555be8f 100644 --- a/flyteadmin/pkg/repositories/gormimpl/common.go +++ b/flyteadmin/pkg/repositories/gormimpl/common.go @@ -52,25 +52,25 @@ var entityToTableName = map[common.Entity]string{ } var innerJoinExecToNodeExec = fmt.Sprintf( - "INNER JOIN %s ON %s.execution_project = %s.execution_project AND "+ - "%s.execution_domain = %s.execution_domain AND %s.execution_name = %s.execution_name", - executionTableName, nodeExecutionTableName, executionTableName, nodeExecutionTableName, executionTableName, - nodeExecutionTableName, executionTableName) + "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+ + "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + executionTableName, nodeExecutionTableName) +var innerJoinExecToTaskExec = fmt.Sprintf( + "INNER JOIN %[1]s ON %[2]s.execution_project = %[1]s.execution_project AND "+ + "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + executionTableName, taskExecutionTableName) var innerJoinNodeExecToTaskExec = fmt.Sprintf( - "INNER JOIN %s ON %s.node_id = %s.node_id AND %s.execution_project = %s.execution_project AND "+ - "%s.execution_domain = %s.execution_domain AND %s.execution_name = %s.execution_name", - nodeExecutionTableName, taskExecutionTableName, nodeExecutionTableName, taskExecutionTableName, - nodeExecutionTableName, taskExecutionTableName, nodeExecutionTableName, taskExecutionTableName, - nodeExecutionTableName) + "INNER JOIN %[1]s ON %s.node_id = %[1]s.node_id AND %[2]s.execution_project = %[1]s.execution_project AND "+ + "%[2]s.execution_domain = %[1]s.execution_domain AND %[2]s.execution_name = %[1]s.execution_name", + nodeExecutionTableName, taskExecutionTableName) // Because dynamic tasks do NOT necessarily register static task definitions, we use a left join to not exclude // dynamic tasks from list queries. var leftJoinTaskToTaskExec = fmt.Sprintf( - "LEFT JOIN %s ON %s.project = %s.project AND %s.domain = %s.domain AND %s.name = %s.name AND "+ - "%s.version = %s.version", - taskTableName, taskExecutionTableName, taskTableName, taskExecutionTableName, taskTableName, - taskExecutionTableName, taskTableName, taskExecutionTableName, taskTableName) + "LEFT JOIN %[1]s ON %[2]s.project = %[1]s.project AND %[2]s.domain = %[1]s.domain AND %[2]s.name = %[1]s.name AND "+ + " %[2]s.version = %[1]s.version", + taskTableName, taskExecutionTableName) // Validates there are no missing but required parameters in ListResourceInput func ValidateListInput(input interfaces.ListResourceInput) adminErrors.FlyteAdminError { diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go index 0fe97d2f8c..70833d4d77 100644 --- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go @@ -3,10 +3,10 @@ package gormimpl import ( "context" "errors" - "fmt" "gorm.io/gorm" + "github.com/flyteorg/flyte/flyteadmin/pkg/common" adminErrors "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/errors" "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/interfaces" "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/models" @@ -113,12 +113,10 @@ func (r *NodeExecutionRepo) List(ctx context.Context, input interfaces.ListResou } var nodeExecutions []models.NodeExecution tx := r.db.WithContext(ctx).Limit(input.Limit).Offset(input.Offset).Preload("ChildNodeExecutions") - // And add join condition (joining multiple tables is fine even we only filter on a subset of table attributes). - // (this query isn't called for deletes). - tx = tx.Joins(fmt.Sprintf("INNER JOIN %s ON %s.execution_project = %s.execution_project AND "+ - "%s.execution_domain = %s.execution_domain AND %s.execution_name = %s.execution_name", - executionTableName, nodeExecutionTableName, executionTableName, - nodeExecutionTableName, executionTableName, nodeExecutionTableName, executionTableName)) + // And add join condition, if any + if input.JoinTableEntities[common.Execution] { + tx = tx.Joins(innerJoinExecToNodeExec) + } // Apply filters tx, err := applyScopedFilters(tx, input.InlineFilters, input.MapFilters) @@ -165,12 +163,10 @@ func (r *NodeExecutionRepo) Count(ctx context.Context, input interfaces.CountRes var err error tx := r.db.WithContext(ctx).Model(&models.NodeExecution{}).Preload("ChildNodeExecutions") - // Add join condition (joining multiple tables is fine even we only filter on a subset of table attributes). - // (this query isn't called for deletes). - tx = tx.Joins(fmt.Sprintf("INNER JOIN %s ON %s.execution_project = %s.execution_project AND "+ - "%s.execution_domain = %s.execution_domain AND %s.execution_name = %s.execution_name", - executionTableName, nodeExecutionTableName, executionTableName, - nodeExecutionTableName, executionTableName, nodeExecutionTableName, executionTableName)) + // And add join condition, if any + if input.JoinTableEntities[common.Execution] { + tx = tx.Joins(innerJoinExecToNodeExec) + } // Apply filters tx, err = applyScopedFilters(tx, input.InlineFilters, input.MapFilters) diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go index e90e342a7c..d35f8ac4f4 100644 --- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go +++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go @@ -215,7 +215,7 @@ func TestListNodeExecutions(t *testing.T) { } GlobalMock := mocket.Catcher.Reset() - GlobalMock.NewMock().WithQuery(`SELECT "node_executions"."id","node_executions"."created_at","node_executions"."updated_at","node_executions"."deleted_at","node_executions"."execution_project","node_executions"."execution_domain","node_executions"."execution_name","node_executions"."node_id","node_executions"."phase","node_executions"."input_uri","node_executions"."closure","node_executions"."started_at","node_executions"."node_execution_created_at","node_executions"."node_execution_updated_at","node_executions"."duration","node_executions"."node_execution_metadata","node_executions"."parent_id","node_executions"."parent_task_execution_id","node_executions"."error_kind","node_executions"."error_code","node_executions"."cache_status","node_executions"."dynamic_workflow_remote_closure_reference","node_executions"."internal_data" FROM "node_executions" INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE node_executions.phase = $1 LIMIT 20%`). + GlobalMock.NewMock().WithQuery(`SELECT * FROM "node_executions" WHERE node_executions.phase = $1 LIMIT 20`). WithReply(nodeExecutions) collection, err := nodeExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ @@ -240,6 +240,59 @@ func TestListNodeExecutions(t *testing.T) { } } +func TestListNodeExecutions_WithJoins(t *testing.T) { + nodeExecutionRepo := NewNodeExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + nodeExecutions := make([]map[string]interface{}, 0) + executionIDs := []string{"100", "200"} + for _, executionID := range executionIDs { + nodeExecution := getMockNodeExecutionResponseFromDb(models.NodeExecution{ + NodeExecutionKey: models.NodeExecutionKey{ + ExecutionKey: models.ExecutionKey{ + Project: "project", + Domain: "domain", + Name: executionID, + }, + }, + Phase: nodePhase, + Closure: []byte("closure"), + InputURI: "input uri", + StartedAt: &nodeStartedAt, + Duration: time.Hour, + NodeExecutionCreatedAt: &nodeCreatedAt, + NodeExecutionUpdatedAt: &nodePlanUpdatedAt, + }) + nodeExecutions = append(nodeExecutions, nodeExecution) + } + GlobalMock := mocket.Catcher.Reset() + GlobalMock.Logging = true + GlobalMock.NewMock().WithQuery(`SELECT "node_executions"."id","node_executions"."created_at","node_executions"."updated_at","node_executions"."deleted_at","node_executions"."execution_project","node_executions"."execution_domain","node_executions"."execution_name","node_executions"."node_id","node_executions"."phase","node_executions"."input_uri","node_executions"."closure","node_executions"."started_at","node_executions"."node_execution_created_at","node_executions"."node_execution_updated_at","node_executions"."duration","node_executions"."node_execution_metadata","node_executions"."parent_id","node_executions"."parent_task_execution_id","node_executions"."error_kind","node_executions"."error_code","node_executions"."cache_status","node_executions"."dynamic_workflow_remote_closure_reference","node_executions"."internal_data" FROM "node_executions" INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE node_executions.phase = $1 LIMIT 20`). + WithReply(nodeExecutions) + + collection, err := nodeExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ + InlineFilters: []common.InlineFilter{ + getEqualityFilter(common.NodeExecution, "phase", nodePhase), + }, + JoinTableEntities: map[common.Entity]bool{ + common.Execution: true, + }, + Limit: 20, + }) + assert.NoError(t, err) + assert.NotEmpty(t, collection) + assert.NotEmpty(t, collection.NodeExecutions) + assert.Len(t, collection.NodeExecutions, 2) + for _, nodeExecution := range collection.NodeExecutions { + assert.Equal(t, "project", nodeExecution.ExecutionKey.Project) + assert.Equal(t, "domain", nodeExecution.ExecutionKey.Domain) + assert.Contains(t, executionIDs, nodeExecution.ExecutionKey.Name) + assert.Equal(t, nodePhase, nodeExecution.Phase) + assert.Equal(t, []byte("closure"), nodeExecution.Closure) + assert.Equal(t, "input uri", nodeExecution.InputURI) + assert.Equal(t, nodeStartedAt, *nodeExecution.StartedAt) + assert.Equal(t, time.Hour, nodeExecution.Duration) + } +} + func TestListNodeExecutions_Order(t *testing.T) { nodeExecutionRepo := NewNodeExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) nodeExecutions := make([]map[string]interface{}, 0) @@ -305,7 +358,7 @@ func TestListNodeExecutionsForExecution(t *testing.T) { nodeExecutions = append(nodeExecutions, nodeExecution) GlobalMock := mocket.Catcher.Reset() - query := `SELECT "node_executions"."id","node_executions"."created_at","node_executions"."updated_at","node_executions"."deleted_at","node_executions"."execution_project","node_executions"."execution_domain","node_executions"."execution_name","node_executions"."node_id","node_executions"."phase","node_executions"."input_uri","node_executions"."closure","node_executions"."started_at","node_executions"."node_execution_created_at","node_executions"."node_execution_updated_at","node_executions"."duration","node_executions"."node_execution_metadata","node_executions"."parent_id","node_executions"."parent_task_execution_id","node_executions"."error_kind","node_executions"."error_code","node_executions"."cache_status","node_executions"."dynamic_workflow_remote_closure_reference","node_executions"."internal_data" FROM "node_executions" INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE node_executions.phase = $1 AND executions.execution_name = $2 LIMIT 20%` + query := `SELECT * FROM "node_executions" WHERE node_executions.phase = $1 AND executions.execution_name = $2 LIMIT 20` GlobalMock.NewMock().WithQuery(query).WithReply(nodeExecutions) collection, err := nodeExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ @@ -392,7 +445,7 @@ func TestCountNodeExecutions_Filters(t *testing.T) { GlobalMock := mocket.Catcher.Reset() GlobalMock.NewMock().WithQuery( - `SELECT count(*) FROM "node_executions" INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE node_executions.phase = $1 AND "node_executions"."error_code" IS NULL`).WithReply([]map[string]interface{}{{"rows": 3}}) + `SELECT count(*) FROM "node_executions" WHERE node_executions.phase = $1 AND "node_executions"."error_code" IS NULL`).WithReply([]map[string]interface{}{{"rows": 3}}) count, err := nodeExecutionRepo.Count(context.Background(), interfaces.CountResourceInput{ InlineFilters: []common.InlineFilter{ diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go index f2ac2adf52..c42d36b1bc 100644 --- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go @@ -6,6 +6,7 @@ import ( "gorm.io/gorm" + "github.com/flyteorg/flyte/flyteadmin/pkg/common" flyteAdminDbErrors "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/errors" "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/interfaces" "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/models" @@ -97,13 +98,20 @@ func (r *TaskExecutionRepo) List(ctx context.Context, input interfaces.ListResou var taskExecutions []models.TaskExecution tx := r.db.WithContext(ctx).Limit(input.Limit).Offset(input.Offset).Preload("ChildNodeExecution") - // And add three join conditions (joining multiple tables is fine even we only filter on a subset of table attributes). - // We are joining on task -> taskExec -> NodeExec -> Exec. - // NOTE: the order in which the joins are called below are important because postgres will only know about certain - // tables as they are joined. So we should do it in the order specified above. - tx = tx.Joins(leftJoinTaskToTaskExec) - tx = tx.Joins(innerJoinNodeExecToTaskExec) - tx = tx.Joins(innerJoinExecToNodeExec) + // And add three join conditions + // We enable joining on + // - task x task exec + // - node exec x task exec + // - exec x task exec + if input.JoinTableEntities[common.Task] { + tx = tx.Joins(leftJoinTaskToTaskExec) + } + if input.JoinTableEntities[common.NodeExecution] { + tx = tx.Joins(innerJoinNodeExecToTaskExec) + } + if input.JoinTableEntities[common.Execution] { + tx = tx.Joins(innerJoinExecToTaskExec) + } // Apply filters tx, err := applyScopedFilters(tx, input.InlineFilters, input.MapFilters) @@ -132,13 +140,20 @@ func (r *TaskExecutionRepo) Count(ctx context.Context, input interfaces.CountRes var err error tx := r.db.WithContext(ctx).Model(&models.TaskExecution{}) - // Add three join conditions (joining multiple tables is fine even we only filter on a subset of table attributes). - // We are joining on task -> taskExec -> NodeExec -> Exec. - // NOTE: the order in which the joins are called below are important because postgres will only know about certain - // tables as they are joined. So we should do it in the order specified above. - tx = tx.Joins(leftJoinTaskToTaskExec) - tx = tx.Joins(innerJoinNodeExecToTaskExec) - tx = tx.Joins(innerJoinExecToNodeExec) + // And add three join conditions + // We enable joining on + // - task x task exec + // - node exec x task exec + // - exec x task exec + if input.JoinTableEntities[common.Task] { + tx = tx.Joins(leftJoinTaskToTaskExec) + } + if input.JoinTableEntities[common.NodeExecution] { + tx = tx.Joins(innerJoinNodeExecToTaskExec) + } + if input.JoinTableEntities[common.Execution] { + tx = tx.Joins(innerJoinExecToTaskExec) + } // Apply filters tx, err = applyScopedFilters(tx, input.InlineFilters, input.MapFilters) diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go index 5947edf175..8ccee763c2 100644 --- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go +++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go @@ -134,7 +134,7 @@ func TestListTaskExecutionForExecution(t *testing.T) { taskExecutions = append(taskExecutions, taskExecution) GlobalMock := mocket.Catcher.Reset() GlobalMock.Logging = true - GlobalMock.NewMock().WithQuery(`SELECT "task_executions"."id","task_executions"."created_at","task_executions"."updated_at","task_executions"."deleted_at","task_executions"."project","task_executions"."domain","task_executions"."name","task_executions"."version","task_executions"."execution_project","task_executions"."execution_domain","task_executions"."execution_name","task_executions"."node_id","task_executions"."retry_attempt","task_executions"."phase","task_executions"."phase_version","task_executions"."input_uri","task_executions"."closure","task_executions"."started_at","task_executions"."task_execution_created_at","task_executions"."task_execution_updated_at","task_executions"."duration" FROM "task_executions" LEFT JOIN tasks ON task_executions.project = tasks.project AND task_executions.domain = tasks.domain AND task_executions.name = tasks.name AND task_executions.version = tasks.version INNER JOIN node_executions ON task_executions.node_id = node_executions.node_id AND task_executions.execution_project = node_executions.execution_project AND task_executions.execution_domain = node_executions.execution_domain AND task_executions.execution_name = node_executions.execution_name INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 LIMIT 20`).WithReply(taskExecutions) + GlobalMock.NewMock().WithQuery(`SELECT * FROM "task_executions" WHERE executions.execution_project = $1 AND executions.execution_domain = $2 AND executions.execution_name = $3 LIMIT 20`).WithReply(taskExecutions) collection, err := taskExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ InlineFilters: []common.InlineFilter{ @@ -160,7 +160,7 @@ func TestListTaskExecutionForExecution(t *testing.T) { } } -func TestListTaskExecutionsForTaskExecution(t *testing.T) { +func TestListTaskExecutionsForNodeExecution(t *testing.T) { taskExecutionRepo := NewTaskExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) taskExecutions := make([]map[string]interface{}, 0) @@ -168,9 +168,50 @@ func TestListTaskExecutionsForTaskExecution(t *testing.T) { taskExecutions = append(taskExecutions, taskExecution) GlobalMock := mocket.Catcher.Reset() - GlobalMock.Logging = true + GlobalMock.NewMock().WithQuery(`SELECT "task_executions"."id","task_executions"."created_at","task_executions"."updated_at","task_executions"."deleted_at","task_executions"."project","task_executions"."domain","task_executions"."name","task_executions"."version","task_executions"."execution_project","task_executions"."execution_domain","task_executions"."execution_name","task_executions"."node_id","task_executions"."retry_attempt","task_executions"."phase","task_executions"."phase_version","task_executions"."input_uri","task_executions"."closure","task_executions"."started_at","task_executions"."task_execution_created_at","task_executions"."task_execution_updated_at","task_executions"."duration" FROM "task_executions" INNER JOIN node_executions ON task_executions.node_id = node_executions.node_id AND task_executions.execution_project = node_executions.execution_project AND task_executions.execution_domain = node_executions.execution_domain AND task_executions.execution_name = node_executions.execution_name WHERE tasks.project = $1 AND tasks.domain = $2 AND tasks.name = $3 AND tasks.version = $4 AND node_executions.phase = $5 AND executions.execution_project = $6 AND executions.execution_domain = $7 AND executions.execution_name = $8 LIMIT 20`).WithReply(taskExecutions) + + collection, err := taskExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ + InlineFilters: []common.InlineFilter{ + getEqualityFilter(common.Task, "project", "project_tn"), + getEqualityFilter(common.Task, "domain", "domain_t"), + getEqualityFilter(common.Task, "name", "domain_t"), + getEqualityFilter(common.Task, "version", "version_t"), + + getEqualityFilter(common.NodeExecution, "phase", nodePhase), + getEqualityFilter(common.Execution, "project", "project_name"), + getEqualityFilter(common.Execution, "domain", "domain_name"), + getEqualityFilter(common.Execution, "name", "execution_name"), + }, + JoinTableEntities: map[common.Entity]bool{ + common.NodeExecution: true, + }, + Limit: 20, + }) + assert.NoError(t, err) + assert.NotEmpty(t, collection) + assert.NotEmpty(t, collection.TaskExecutions) + assert.Len(t, collection.TaskExecutions, 1) + + for _, taskExecution := range collection.TaskExecutions { + assert.Equal(t, testTaskExecution.TaskExecutionKey, taskExecution.TaskExecutionKey) + assert.Equal(t, &retryAttemptValue, taskExecution.RetryAttempt) + assert.Equal(t, taskPhase, taskExecution.Phase) + assert.Equal(t, []byte("Test"), taskExecution.Closure) + assert.Equal(t, "testInput.pb", taskExecution.InputURI) + assert.Equal(t, taskStartedAt, *taskExecution.StartedAt) + assert.Equal(t, time.Hour, taskExecution.Duration) + } +} + +func TestListTaskExecutionsForExecution(t *testing.T) { + taskExecutionRepo := NewTaskExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + taskExecutions := make([]map[string]interface{}, 0) + taskExecution := getMockTaskExecutionResponseFromDb(testTaskExecution) + taskExecutions = append(taskExecutions, taskExecution) - GlobalMock.NewMock().WithQuery(`SELECT "task_executions"."id","task_executions"."created_at","task_executions"."updated_at","task_executions"."deleted_at","task_executions"."project","task_executions"."domain","task_executions"."name","task_executions"."version","task_executions"."execution_project","task_executions"."execution_domain","task_executions"."execution_name","task_executions"."node_id","task_executions"."retry_attempt","task_executions"."phase","task_executions"."phase_version","task_executions"."input_uri","task_executions"."closure","task_executions"."started_at","task_executions"."task_execution_created_at","task_executions"."task_execution_updated_at","task_executions"."duration" FROM "task_executions" LEFT JOIN tasks ON task_executions.project = tasks.project AND task_executions.domain = tasks.domain AND task_executions.name = tasks.name AND task_executions.version = tasks.version INNER JOIN node_executions ON task_executions.node_id = node_executions.node_id AND task_executions.execution_project = node_executions.execution_project AND task_executions.execution_domain = node_executions.execution_domain AND task_executions.execution_name = node_executions.execution_name INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE tasks.project = $1 AND tasks.domain = $2 AND tasks.name = $3 AND tasks.version = $4 AND node_executions.phase = $5 AND executions.execution_project = $6 AND executions.execution_domain = $7 AND executions.execution_name = $8 LIMIT 20`).WithReply(taskExecutions) + GlobalMock := mocket.Catcher.Reset() + GlobalMock.NewMock().WithQuery(`SELECT "task_executions"."id","task_executions"."created_at","task_executions"."updated_at","task_executions"."deleted_at","task_executions"."project","task_executions"."domain","task_executions"."name","task_executions"."version","task_executions"."execution_project","task_executions"."execution_domain","task_executions"."execution_name","task_executions"."node_id","task_executions"."retry_attempt","task_executions"."phase","task_executions"."phase_version","task_executions"."input_uri","task_executions"."closure","task_executions"."started_at","task_executions"."task_execution_created_at","task_executions"."task_execution_updated_at","task_executions"."duration" FROM "task_executions" INNER JOIN executions ON task_executions.execution_project = executions.execution_project AND task_executions.execution_domain = executions.execution_domain AND task_executions.execution_name = executions.execution_name WHERE tasks.project = $1 AND tasks.domain = $2 AND tasks.name = $3 AND tasks.version = $4 AND tasks.org = $5 AND executions.execution_project = $6 AND executions.execution_domain = $7 AND executions.execution_name = $8 AND executions.org = $9 LIMIT 20`).WithReply(taskExecutions) collection, err := taskExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ InlineFilters: []common.InlineFilter{ @@ -178,11 +219,62 @@ func TestListTaskExecutionsForTaskExecution(t *testing.T) { getEqualityFilter(common.Task, "domain", "domain_t"), getEqualityFilter(common.Task, "name", "domain_t"), getEqualityFilter(common.Task, "version", "version_t"), + getEqualityFilter(common.Task, "org", "org_t"), + + getEqualityFilter(common.Execution, "project", "project_name"), + getEqualityFilter(common.Execution, "domain", "domain_name"), + getEqualityFilter(common.Execution, "name", "execution_name"), + getEqualityFilter(common.Execution, "org", "execution_org"), + }, + JoinTableEntities: map[common.Entity]bool{ + common.Execution: true, + }, + Limit: 20, + }) + assert.NoError(t, err) + assert.NotEmpty(t, collection) + assert.NotEmpty(t, collection.TaskExecutions) + assert.Len(t, collection.TaskExecutions, 1) + + for _, taskExecution := range collection.TaskExecutions { + assert.Equal(t, testTaskExecution.TaskExecutionKey, taskExecution.TaskExecutionKey) + assert.Equal(t, &retryAttemptValue, taskExecution.RetryAttempt) + assert.Equal(t, taskPhase, taskExecution.Phase) + assert.Equal(t, []byte("Test"), taskExecution.Closure) + assert.Equal(t, "testInput.pb", taskExecution.InputURI) + assert.Equal(t, taskStartedAt, *taskExecution.StartedAt) + assert.Equal(t, time.Hour, taskExecution.Duration) + } +} + +func TestListTaskExecutionsForNodeAndExecution(t *testing.T) { + taskExecutionRepo := NewTaskExecutionRepo(GetDbForTest(t), errors.NewTestErrorTransformer(), mockScope.NewTestScope()) + + taskExecutions := make([]map[string]interface{}, 0) + taskExecution := getMockTaskExecutionResponseFromDb(testTaskExecution) + taskExecutions = append(taskExecutions, taskExecution) + + GlobalMock := mocket.Catcher.Reset() + + GlobalMock.NewMock().WithQuery(`SELECT "task_executions"."id","task_executions"."created_at","task_executions"."updated_at","task_executions"."deleted_at","task_executions"."project","task_executions"."domain","task_executions"."name","task_executions"."version","task_executions"."execution_project","task_executions"."execution_domain","task_executions"."execution_name","task_executions"."node_id","task_executions"."retry_attempt","task_executions"."phase","task_executions"."phase_version","task_executions"."input_uri","task_executions"."closure","task_executions"."started_at","task_executions"."task_execution_created_at","task_executions"."task_execution_updated_at","task_executions"."duration" FROM "task_executions" INNER JOIN node_executions ON task_executions.node_id = node_executions.node_id AND task_executions.execution_project = node_executions.execution_project AND task_executions.execution_domain = node_executions.execution_domain AND task_executions.execution_name = node_executions.execution_name INNER JOIN executions ON task_executions.execution_project = executions.execution_project AND task_executions.execution_domain = executions.execution_domain AND task_executions.execution_name = executions.execution_name WHERE tasks.project = $1 AND tasks.domain = $2 AND tasks.name = $3 AND tasks.version = $4 AND tasks.org = $5 AND node_executions.phase = $6 AND executions.execution_project = $7 AND executions.execution_domain = $8 AND executions.execution_name = $9 AND executions.org = $10 LIMIT 20`).WithReply(taskExecutions) + + collection, err := taskExecutionRepo.List(context.Background(), interfaces.ListResourceInput{ + InlineFilters: []common.InlineFilter{ + getEqualityFilter(common.Task, "project", "project_tn"), + getEqualityFilter(common.Task, "domain", "domain_t"), + getEqualityFilter(common.Task, "name", "domain_t"), + getEqualityFilter(common.Task, "version", "version_t"), + getEqualityFilter(common.Task, "org", "org_t"), getEqualityFilter(common.NodeExecution, "phase", nodePhase), getEqualityFilter(common.Execution, "project", "project_name"), getEqualityFilter(common.Execution, "domain", "domain_name"), getEqualityFilter(common.Execution, "name", "execution_name"), + getEqualityFilter(common.Execution, "org", "execution_org"), + }, + JoinTableEntities: map[common.Entity]bool{ + common.NodeExecution: true, + common.Execution: true, }, Limit: 20, }) @@ -219,7 +311,7 @@ func TestCountTaskExecutions_Filters(t *testing.T) { GlobalMock := mocket.Catcher.Reset() GlobalMock.NewMock().WithQuery( - `SELECT count(*) FROM "task_executions" LEFT JOIN tasks ON task_executions.project = tasks.project AND task_executions.domain = tasks.domain AND task_executions.name = tasks.name AND task_executions.version = tasks.version INNER JOIN node_executions ON task_executions.node_id = node_executions.node_id AND task_executions.execution_project = node_executions.execution_project AND task_executions.execution_domain = node_executions.execution_domain AND task_executions.execution_name = node_executions.execution_name INNER JOIN executions ON node_executions.execution_project = executions.execution_project AND node_executions.execution_domain = executions.execution_domain AND node_executions.execution_name = executions.execution_name WHERE task_executions.phase = $1 AND "task_execution_updated_at" IS NULL`).WithReply([]map[string]interface{}{{"rows": 3}}) + `SELECT count(*) FROM "task_executions" WHERE task_executions.phase = $1 AND "task_execution_updated_at" IS NULL`).WithReply([]map[string]interface{}{{"rows": 3}}) count, err := taskExecutionRepo.Count(context.Background(), interfaces.CountResourceInput{ InlineFilters: []common.InlineFilter{ diff --git a/flyteadmin/pkg/runtime/interfaces/application_configuration.go b/flyteadmin/pkg/runtime/interfaces/application_configuration.go index 15ed271412..55791a1538 100644 --- a/flyteadmin/pkg/runtime/interfaces/application_configuration.go +++ b/flyteadmin/pkg/runtime/interfaces/application_configuration.go @@ -1,6 +1,13 @@ package interfaces import ( + "context" + "crypto/tls" + "fmt" + "os" + "strings" + + "github.com/Shopify/sarama" "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/time/rate" @@ -231,11 +238,89 @@ type GCPConfig struct { ProjectID string `json:"projectId"` } +// This section holds SASL config for Kafka +type SASLConfig struct { + // Whether to use SASL + Enabled bool `json:"enabled"` + // The username + User string `json:"user"` + // The password + Password string `json:"password"` + PasswordPath string `json:"passwordPath"` + Handshake bool `json:"handshake"` + // Which SASL Mechanism to use. Defaults to PLAIN + Mechanism sarama.SASLMechanism `json:"mechanism"` +} + +// This section holds TLS config for Kafka clients +type TLSConfig struct { + // Whether to use TLS + Enabled bool `json:"enabled"` + // Whether to skip certificate verification + InsecureSkipVerify bool `json:"insecureSkipVerify"` + // The location of the client certificate + CertPath string `json:"certPath"` + // The location of the client private key + KeyPath string `json:"keyPath"` +} + +// This section holds configs for Kafka clients type KafkaConfig struct { // The version of Kafka, e.g. 2.1.0, 0.8.2.0 Version string `json:"version"` // kafka broker addresses Brokers []string `json:"brokers"` + // sasl config + SASLConfig SASLConfig `json:"saslConfig"` + // tls config + TLSConfig TLSConfig `json:"tlsConfig"` +} + +func (k KafkaConfig) UpdateSaramaConfig(ctx context.Context, s *sarama.Config) { + var err error + s.Version, err = sarama.ParseKafkaVersion(k.Version) + if err != nil { + panic(err) + } + + if k.SASLConfig.Enabled { + s.Net.SASL.Enable = true + s.Net.SASL.User = k.SASLConfig.User + + if len(k.SASLConfig.PasswordPath) > 0 { + if _, err := os.Stat(k.SASLConfig.PasswordPath); os.IsNotExist(err) { + panic(fmt.Sprintf("missing kafka password at the specified path [%s]", k.SASLConfig.PasswordPath)) + } + passwordVal, err := os.ReadFile(k.SASLConfig.PasswordPath) + if err != nil { + panic(fmt.Sprintf("failed to kafka password from path [%s] with err: %v", k.SASLConfig.PasswordPath, err)) + } + + s.Net.SASL.Password = strings.TrimSpace(string(passwordVal)) + } else { + s.Net.SASL.Password = k.SASLConfig.Password + } + s.Net.SASL.Handshake = k.SASLConfig.Handshake + + if k.SASLConfig.Mechanism == "" { + k.SASLConfig.Mechanism = sarama.SASLTypePlaintext + } + s.Net.SASL.Mechanism = k.SASLConfig.Mechanism + } + + if k.TLSConfig.Enabled { + s.Net.TLS.Enable = true + s.Net.TLS.Config = &tls.Config{ + InsecureSkipVerify: k.TLSConfig.InsecureSkipVerify, + } + if k.TLSConfig.KeyPath != "" && k.TLSConfig.CertPath != "" { + cert, err := tls.LoadX509KeyPair(k.TLSConfig.CertPath, k.TLSConfig.KeyPath) + if err != nil { + panic(err) + } + s.Net.TLS.Config.Certificates = []tls.Certificate{cert} + } + } } // This section holds configuration for the event scheduler used to schedule workflow executions. diff --git a/flyteadmin/pkg/server/initialize.go b/flyteadmin/pkg/server/initialize.go index 2d2d7cacc1..42e5271961 100644 --- a/flyteadmin/pkg/server/initialize.go +++ b/flyteadmin/pkg/server/initialize.go @@ -67,7 +67,7 @@ func Rollback(ctx context.Context) error { } // SeedProjects creates a set of given projects in the DB -func SeedProjects(ctx context.Context, projects []string) error { +func SeedProjects(ctx context.Context, projects []config.SeedProject) error { return withDB(ctx, func(db *gorm.DB) error { if err := config.SeedProjects(db, projects); err != nil { return fmt.Errorf("could not add projects to database with err: %v", err) diff --git a/flyteadmin/pkg/server/service.go b/flyteadmin/pkg/server/service.go index 840d0d9f17..77e8c5803b 100644 --- a/flyteadmin/pkg/server/service.go +++ b/flyteadmin/pkg/server/service.go @@ -12,7 +12,6 @@ import ( "github.com/gorilla/handlers" grpcmiddleware "github.com/grpc-ecosystem/go-grpc-middleware" grpcauth "github.com/grpc-ecosystem/go-grpc-middleware/auth" - grpcrecovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" grpcprometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" "github.com/pkg/errors" @@ -111,7 +110,6 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c chainedUnaryInterceptors = grpcmiddleware.ChainUnaryServer( // recovery interceptor should always be first in order to handle any panics in the middleware or server recoveryInterceptor.UnaryServerInterceptor(), - grpcrecovery.UnaryServerInterceptor(), grpcprometheus.UnaryServerInterceptor, otelUnaryServerInterceptor, auth.GetAuthenticationCustomMetadataInterceptor(authCtx), @@ -136,7 +134,6 @@ func newGRPCServer(ctx context.Context, pluginRegistry *plugins.Registry, cfg *c ) serverOpts := []grpc.ServerOption{ - // recovery interceptor should always be first in order to handle any panics in the middleware or server grpc.StreamInterceptor(chainedStreamInterceptors), grpc.UnaryInterceptor(chainedUnaryInterceptors), } @@ -352,7 +349,7 @@ func serveGatewayInsecure(ctx context.Context, pluginRegistry *plugins.Registry, var oauth2Provider interfaces.OAuth2Provider var oauth2ResourceServer interfaces.OAuth2ResourceServer if authCfg.AppAuth.AuthServerType == authConfig.AuthorizationServerTypeSelf { - oauth2Provider, err = authzserver.NewProvider(ctx, authCfg.AppAuth.SelfAuthServer, sm) + oauth2Provider, err = authzserver.NewProvider(ctx, authCfg.AppAuth.SelfAuthServer, sm, scope.NewSubScope("auth_provider")) if err != nil { logger.Errorf(ctx, "Error creating authorization server %s", err) return err @@ -463,7 +460,7 @@ func serveGatewaySecure(ctx context.Context, pluginRegistry *plugins.Registry, c var oauth2Provider interfaces.OAuth2Provider var oauth2ResourceServer interfaces.OAuth2ResourceServer if authCfg.AppAuth.AuthServerType == authConfig.AuthorizationServerTypeSelf { - oauth2Provider, err = authzserver.NewProvider(ctx, authCfg.AppAuth.SelfAuthServer, sm) + oauth2Provider, err = authzserver.NewProvider(ctx, authCfg.AppAuth.SelfAuthServer, sm, scope.NewSubScope("auth_provider")) if err != nil { logger.Errorf(ctx, "Error creating authorization server %s", err) return err @@ -516,9 +513,19 @@ func serveGatewaySecure(ctx context.Context, pluginRegistry *plugins.Registry, c panic(err) } + handler := grpcHandlerFunc(grpcServer, httpServer) + if cfg.Security.AllowCors { + handler = handlers.CORS( + handlers.AllowCredentials(), + handlers.AllowedOrigins(cfg.Security.AllowedOrigins), + handlers.AllowedHeaders(append(defaultCorsHeaders, cfg.Security.AllowedHeaders...)), + handlers.AllowedMethods([]string{"GET", "POST", "DELETE", "HEAD", "PUT", "PATCH"}), + )(handler) + } + srv := &http.Server{ Addr: cfg.GetHostAddress(), - Handler: grpcHandlerFunc(grpcServer, httpServer), + Handler: handler, // #nosec G402 TLSConfig: &tls.Config{ Certificates: []tls.Certificate{*cert}, diff --git a/flytecopilot/go.mod b/flytecopilot/go.mod index ba5f8fa6d5..d943bb5153 100644 --- a/flytecopilot/go.mod +++ b/flytecopilot/go.mod @@ -34,7 +34,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect @@ -69,7 +69,6 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -79,10 +78,10 @@ require ( github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -103,9 +102,9 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect go.opentelemetry.io/proto/otlp v1.1.0 // indirect golang.org/x/crypto v0.25.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect @@ -117,7 +116,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/flytecopilot/go.sum b/flytecopilot/go.sum index 9fc4e35767..b1f65b79e1 100644 --- a/flytecopilot/go.sum +++ b/flytecopilot/go.sum @@ -71,8 +71,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -267,8 +267,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/go-ps v1.0.0 h1:i6ampVEEF4wQFF+bkYfwYgY+F/uYJDktmvLPf7qIgjc= github.com/mitchellh/go-ps v1.0.0/go.mod h1:J4lOc8z8yJs6vUwklHw2XEIiT4z4C40KtWVN3nvg8Pg= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -298,15 +296,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -407,8 +405,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -477,8 +475,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -717,8 +715,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/flytectl/cmd/compile/compile_test.go b/flytectl/cmd/compile/compile_test.go index 2d91260aff..3c90e6e54e 100644 --- a/flytectl/cmd/compile/compile_test.go +++ b/flytectl/cmd/compile/compile_test.go @@ -6,7 +6,7 @@ import ( config "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/compile" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" - u "github.com/flyteorg/flyte/flytectl/cmd/testutils" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" ) @@ -29,9 +29,7 @@ func TestCompileCommand(t *testing.T) { // compiling via cobra command compileCfg := config.DefaultCompileConfig compileCfg.File = "testdata/valid-package.tgz" - var setup = u.Setup - s := setup() - defer s.TearDown() + s := testutils.Setup(t) compileCmd := CreateCompileCommand()["compile"] err := compileCmd.CmdFunc(context.Background(), []string{}, s.CmdCtx) assert.Nil(t, err, "compiling via cmd returns err") diff --git a/flytectl/cmd/create/create_test.go b/flytectl/cmd/create/create_test.go index b7b5a2c32c..b122d64e51 100644 --- a/flytectl/cmd/create/create_test.go +++ b/flytectl/cmd/create/create_test.go @@ -4,14 +4,11 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) const testDataFolder = "../testdata/" -var setup = testutils.Setup - func TestCreateCommand(t *testing.T) { createCommand := RemoteCreateCommand() assert.Equal(t, createCommand.Use, "create") diff --git a/flytectl/cmd/create/execution_test.go b/flytectl/cmd/create/execution_test.go index 4886e2e7b8..d01b683e02 100644 --- a/flytectl/cmd/create/execution_test.go +++ b/flytectl/cmd/create/execution_test.go @@ -19,10 +19,11 @@ type createSuite struct { suite.Suite testutils.TestStruct originalExecConfig ExecutionConfig + t *testing.T } func (s *createSuite) SetupTest() { - s.TestStruct = setup() + s.TestStruct = testutils.Setup(s.t) // TODO: migrate to new command context from testutils s.CmdCtx = cmdCore.NewCommandContext(s.MockClient, s.MockOutStream) @@ -30,7 +31,6 @@ func (s *createSuite) SetupTest() { } func (s *createSuite) TearDownTest() { - defer s.TearDown() orig := s.originalExecConfig executionConfig = &orig s.MockAdminClient.AssertExpectations(s.T()) @@ -331,5 +331,5 @@ func (s *createSuite) Test_CreateTaskExecution_DryRun() { } func TestCreateSuite(t *testing.T) { - suite.Run(t, &createSuite{originalExecConfig: *executionConfig}) + suite.Run(t, &createSuite{originalExecConfig: *executionConfig, t: t}) } diff --git a/flytectl/cmd/create/execution_util_test.go b/flytectl/cmd/create/execution_util_test.go index 000e3621d3..e27ba4a96b 100644 --- a/flytectl/cmd/create/execution_util_test.go +++ b/flytectl/cmd/create/execution_util_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/stretchr/testify/assert" @@ -45,8 +46,7 @@ func createExecutionUtilSetup() { } func TestCreateExecutionForRelaunch(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(executionCreateResponse, nil) @@ -55,8 +55,7 @@ func TestCreateExecutionForRelaunch(t *testing.T) { } func TestCreateExecutionForRelaunchNotFound(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(nil, errors.New("unknown execution")) @@ -67,8 +66,7 @@ func TestCreateExecutionForRelaunchNotFound(t *testing.T) { } func TestCreateExecutionForRecovery(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(executionCreateResponse, nil) @@ -77,8 +75,7 @@ func TestCreateExecutionForRecovery(t *testing.T) { } func TestCreateExecutionForRecoveryNotFound(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(nil, errors.New("unknown execution")) @@ -89,8 +86,7 @@ func TestCreateExecutionForRecoveryNotFound(t *testing.T) { func TestCreateExecutionRequestForWorkflow(t *testing.T) { t.Run("successful", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -100,8 +96,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -114,8 +109,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with empty envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -128,8 +122,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with execution Cluster label and envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -144,8 +137,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, "cluster", execCreateRequest.Spec.ExecutionClusterLabel.Value) }) t.Run("failed literal conversion", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{ @@ -162,8 +154,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, fmt.Errorf("parameter [nilparam] has nil Variable"), err) }) t.Run("failed fetch", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -173,8 +164,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, err, errors.New("failed")) }) t.Run("with security context", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.KubeServiceAcct = "default" @@ -190,8 +180,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { func TestCreateExecutionRequestForTask(t *testing.T) { t.Run("successful", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -205,8 +194,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -223,8 +211,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with empty envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -241,8 +228,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("failed literal conversion", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -267,8 +253,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.Equal(t, fmt.Errorf("variable [nilvar] has nil type"), err) }) t.Run("failed fetch", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -278,8 +263,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.Equal(t, err, errors.New("failed")) }) t.Run("with security context", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.KubeServiceAcct = "default" @@ -316,8 +300,7 @@ func Test_resolveOverrides(t *testing.T) { } func TestCreateExecutionForRelaunchOverwritingCache(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.OverwriteCache = true diff --git a/flytectl/cmd/create/project_test.go b/flytectl/cmd/create/project_test.go index 1d63c0fceb..1dc33356f1 100644 --- a/flytectl/cmd/create/project_test.go +++ b/flytectl/cmd/create/project_test.go @@ -8,6 +8,7 @@ import ( "github.com/flyteorg/flyte/flytectl/clierrors" "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/project" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -36,13 +37,12 @@ func createProjectSetup() { project.DefaultProjectConfig.Description = "" config.GetConfig().Project = "" } + func TestCreateProjectFunc(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "project created successfully.") - defer s.TearDown() project.DefaultProjectConfig.ID = projectValue project.DefaultProjectConfig.Name = projectValue project.DefaultProjectConfig.Labels = map[string]string{} @@ -54,12 +54,10 @@ func TestCreateProjectFunc(t *testing.T) { } func TestEmptyProjectID(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "") - defer s.TearDown() project.DefaultProjectConfig = &project.ConfigProject{} s.MockAdminClient.OnRegisterProjectMatch(s.Ctx, projectRegisterRequest).Return(nil, nil) err := createProjectsCommand(s.Ctx, []string{}, s.CmdCtx) @@ -68,12 +66,10 @@ func TestEmptyProjectID(t *testing.T) { } func TestEmptyProjectName(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "") - defer s.TearDown() project.DefaultProjectConfig.ID = projectValue project.DefaultProjectConfig.Labels = map[string]string{} project.DefaultProjectConfig.Description = "" diff --git a/flytectl/cmd/delete/delete_test.go b/flytectl/cmd/delete/delete_test.go index 0184450305..36b0cb8ad1 100644 --- a/flytectl/cmd/delete/delete_test.go +++ b/flytectl/cmd/delete/delete_test.go @@ -4,7 +4,6 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -13,8 +12,6 @@ const ( testDataInvalidAttrFile = "testdata/invalid_attribute.yaml" ) -var setup = testutils.Setup - func TestDeleteCommand(t *testing.T) { deleteCommand := RemoteDeleteCommand() assert.Equal(t, deleteCommand.Use, "delete") diff --git a/flytectl/cmd/delete/execution_test.go b/flytectl/cmd/delete/execution_test.go index c883a4d4df..6b71010879 100644 --- a/flytectl/cmd/delete/execution_test.go +++ b/flytectl/cmd/delete/execution_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/stretchr/testify/assert" @@ -32,7 +33,8 @@ func terminateExecutionSetup() { } func TestTerminateExecutionFunc(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) @@ -45,7 +47,8 @@ func TestTerminateExecutionFunc(t *testing.T) { } func TestTerminateExecutionFuncWithError(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(nil, errors.New("failed to terminate")) @@ -58,7 +61,8 @@ func TestTerminateExecutionFuncWithError(t *testing.T) { } func TestTerminateExecutionFuncWithPartialSuccess(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) diff --git a/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go index f2fe9ca49e..318959da95 100644 --- a/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteClusterResourceAttributeSetup() { func TestDeleteClusterResourceAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_cluster_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_cluster_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_execution_cluster_label_test.go b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go index 7335bd6721..bc9d21a889 100644 --- a/flytectl/cmd/delete/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteExecutionClusterLabelSetup() { func TestDeleteExecutionClusterLabels(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_cluster_label.yaml" @@ -56,7 +60,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_cluster_label.yaml" @@ -99,7 +106,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go index 20402ee79b..53a9613c74 100644 --- a/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/executionqueueattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteExecutionQueueAttributeSetup() { func TestDeleteExecutionQueueAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionQueueAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_queue_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_queue_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_plugin_override_test.go b/flytectl/cmd/delete/matchable_plugin_override_test.go index 623729fdd2..f070fe87b7 100644 --- a/flytectl/cmd/delete/matchable_plugin_override_test.go +++ b/flytectl/cmd/delete/matchable_plugin_override_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" pluginoverride "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deletePluginOverrideSetup() { func TestPluginOverride(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_plugin_override.yaml" @@ -56,7 +60,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_workflow_plugin_override.yaml" @@ -99,7 +106,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_task_resource_attribute_test.go b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go index 484052b6ab..4b19275bfa 100644 --- a/flytectl/cmd/delete/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteTaskResourceAttributeSetup() { func TestDeleteTaskResourceAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_task_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_task_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_workflow_execution_config_test.go b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go index 88681a32d5..7c473a5ffe 100644 --- a/flytectl/cmd/delete/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/workflowexecutionconfig" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteWorkflowExecutionConfigSetup() { func TestDeleteWorkflowExecutionConfig(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_workflow_execution_config.yaml" @@ -56,7 +60,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_workflow_workflow_execution_config.yaml" @@ -99,7 +106,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/demo/exec_test.go b/flytectl/cmd/demo/exec_test.go index c4b289105f..34ed289dc6 100644 --- a/flytectl/cmd/demo/exec_test.go +++ b/flytectl/cmd/demo/exec_test.go @@ -50,8 +50,7 @@ func TestDemoClusterExec(t *testing.T) { func TestSandboxClusterExecWithoutCmd(t *testing.T) { mockDocker := &mocks.Docker{} reader := bufio.NewReader(strings.NewReader("test")) - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx diff --git a/flytectl/cmd/demo/status_test.go b/flytectl/cmd/demo/status_test.go index f2006cdbf8..4080072160 100644 --- a/flytectl/cmd/demo/status_test.go +++ b/flytectl/cmd/demo/status_test.go @@ -14,16 +14,14 @@ import ( func TestDemoStatus(t *testing.T) { t.Run("Demo status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) docker.Client = mockDocker err := demoClusterStatus(s.Ctx, []string{}, s.CmdCtx) assert.Nil(t, err) }) t.Run("Demo status with running", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/demo/teardown_test.go b/flytectl/cmd/demo/teardown_test.go index 73927d86eb..4cbcf037a0 100644 --- a/flytectl/cmd/demo/teardown_test.go +++ b/flytectl/cmd/demo/teardown_test.go @@ -80,8 +80,7 @@ func TestTearDownFunc(t *testing.T) { func TestTearDownClusterFunc(t *testing.T) { _ = util.SetupFlyteDir() _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} diff --git a/flytectl/cmd/get/execution_test.go b/flytectl/cmd/get/execution_test.go index 329211621a..0598ec72b6 100644 --- a/flytectl/cmd/get/execution_test.go +++ b/flytectl/cmd/get/execution_test.go @@ -29,8 +29,7 @@ func getExecutionSetup() { func TestListExecutionFunc(t *testing.T) { getExecutionSetup() - s := setup() - defer s.TearDown() + s := testutils.Setup(t) executionResponse := &admin.Execution{ Id: &core.WorkflowExecutionIdentifier{ @@ -92,8 +91,7 @@ func TestListExecutionFuncWithError(t *testing.T) { Phase: core.WorkflowExecution_SUCCEEDED, }, } - s := setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnListExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("executions NotFound")) err := getExecutionFunc(s.Ctx, []string{}, s.CmdCtx) @@ -129,8 +127,7 @@ func TestGetExecutionFunc(t *testing.T) { }, } args := []string{executionNameValue} - s := setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(executionResponse, nil) err := getExecutionFunc(s.Ctx, args, s.CmdCtx) @@ -139,8 +136,7 @@ func TestGetExecutionFunc(t *testing.T) { } func TestGetExecutionFuncForDetails(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -156,8 +152,7 @@ func TestGetExecutionFuncForDetails(t *testing.T) { func TestGetExecutionFuncWithIOData(t *testing.T) { t.Run("successful inputs outputs", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx @@ -222,8 +217,7 @@ func TestGetExecutionFuncWithIOData(t *testing.T) { assert.Nil(t, err) }) t.Run("fetch data error from admin", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx @@ -264,8 +258,7 @@ func TestGetExecutionFuncWithIOData(t *testing.T) { args := []string{dummyExec} for _, tt := range tests { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config.GetConfig().Output = tt.outputFormat execution.DefaultConfig.NodeID = tt.nodeID @@ -365,8 +358,7 @@ func TestGetExecutionFuncWithError(t *testing.T) { } args := []string{executionNameValue} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("execution NotFound")) err := getExecutionFunc(s.Ctx, args, s.CmdCtx) diff --git a/flytectl/cmd/get/get_test.go b/flytectl/cmd/get/get_test.go index c40394c785..c11e4339da 100644 --- a/flytectl/cmd/get/get_test.go +++ b/flytectl/cmd/get/get_test.go @@ -5,7 +5,6 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -19,8 +18,6 @@ const workflowNameValue = "wf_name" const workflowVersionValue = "wf_version" const testDataFolder = "../testdata/" -var setup = testutils.Setup - const ( testDataTempFile = "temp-output-file" testDataNotExistentTempFile = "non-existent-dir/temp-output-file" diff --git a/flytectl/cmd/get/launch_plan_test.go b/flytectl/cmd/get/launch_plan_test.go index 87cc091535..7b1359b7ec 100644 --- a/flytectl/cmd/get/launch_plan_test.go +++ b/flytectl/cmd/get/launch_plan_test.go @@ -215,8 +215,7 @@ func getLaunchPlanSetup() { func TestGetLaunchPlanFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) launchplan.DefaultConfig.Latest = true @@ -228,8 +227,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) launchplan.DefaultConfig.Version = "v1" @@ -241,8 +239,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Filter = filters.Filters{} launchplan.DefaultConfig.Filter = filters.Filters{} @@ -254,8 +251,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(nil, fmt.Errorf("error fetching all version")) @@ -266,8 +262,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching list", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() argsLp = []string{} s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) @@ -278,8 +273,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { } func TestGetLaunchPlanFunc(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) @@ -289,8 +283,7 @@ func TestGetLaunchPlanFunc(t *testing.T) { } func TestGetLaunchPlanFuncLatest(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Latest = true s.FetcherExt.OnFetchLPLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan2, nil) @@ -301,8 +294,7 @@ func TestGetLaunchPlanFuncLatest(t *testing.T) { } func TestGetLaunchPlanWithVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Version = "v2" s.FetcherExt.OnFetchLPVersion(s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain").Return(launchPlan2, nil) @@ -314,8 +306,7 @@ func TestGetLaunchPlanWithVersion(t *testing.T) { func TestGetLaunchPlans(t *testing.T) { t.Run("no workflow filter", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) argsLp = []string{} @@ -324,8 +315,7 @@ func TestGetLaunchPlans(t *testing.T) { s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) }) t.Run("workflow filter", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{ FieldSelector: "workflow.name=workflow2", @@ -337,8 +327,7 @@ func TestGetLaunchPlans(t *testing.T) { s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) }) t.Run("workflow filter error", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() argsLp = []string{} launchplan.DefaultConfig.Workflow = "workflow2" @@ -350,8 +339,7 @@ func TestGetLaunchPlans(t *testing.T) { } func TestGetLaunchPlansWithExecFile(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceListRequest).Return(launchPlanListResponse, nil) s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) @@ -386,8 +374,7 @@ workflow: launchplan1 } func TestGetLaunchPlanTableFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(launchPlanListResponse, nil) s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) diff --git a/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go index 43069edaa6..95e53e5b38 100644 --- a/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go @@ -47,8 +47,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","attributes":{"foo":"bar"}}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","attributes":{"foo":"bar"}}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_execution_cluster_label_test.go b/flytectl/cmd/get/matchable_execution_cluster_label_test.go index 3ac42a87de..03a0bdba96 100644 --- a/flytectl/cmd/get/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/get/matchable_execution_cluster_label_test.go @@ -47,8 +47,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","value":"foo"}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() executionclusterlabel.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() executionclusterlabel.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed to get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","value":"foo"}`) }) t.Run("failed to get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_execution_queue_attribute_test.go b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go index 3dd8e235cf..74b8d4dd91 100644 --- a/flytectl/cmd/get/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go @@ -47,8 +47,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","tags":["foo","bar"]}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() executionqueueattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() executionqueueattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","tags":["foo","bar"]}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_plugin_override_test.go b/flytectl/cmd/get/matchable_plugin_override_test.go index 025267a462..ba70299444 100644 --- a/flytectl/cmd/get/matchable_plugin_override_test.go +++ b/flytectl/cmd/get/matchable_plugin_override_test.go @@ -59,8 +59,7 @@ func TestGetPluginOverride(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() // No args implying project domain attribute deletion @@ -73,8 +72,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() pluginoverride.DefaultFetchConfig.AttrFile = testDataTempFile @@ -88,8 +86,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() pluginoverride.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -104,8 +101,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() // No args implying project domain attribute deletion @@ -119,8 +115,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() args := []string{"workflow"} @@ -133,8 +128,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_task_resource_attribute_test.go b/flytectl/cmd/get/matchable_task_resource_attribute_test.go index b5e8887583..db830a0a29 100644 --- a/flytectl/cmd/get/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/get/matchable_task_resource_attribute_test.go @@ -54,8 +54,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() // No args implying project domain attribute deletion @@ -68,8 +67,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() taskresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -83,8 +81,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() taskresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -99,8 +96,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() // No args implying project domain attribute deletion @@ -114,8 +110,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() args := []string{"workflow"} @@ -129,8 +124,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_workflow_execution_config_test.go b/flytectl/cmd/get/matchable_workflow_execution_config_test.go index 69b88ee900..2600e6ea8a 100644 --- a/flytectl/cmd/get/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/get/matchable_workflow_execution_config_test.go @@ -47,8 +47,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","max_parallelism":5}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","max_parallelism":5}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/node_execution_test.go b/flytectl/cmd/get/node_execution_test.go index 588ea6033c..030b5c0262 100644 --- a/flytectl/cmd/get/node_execution_test.go +++ b/flytectl/cmd/get/node_execution_test.go @@ -158,8 +158,7 @@ func createDummyTaskExecutionForNode(nodeID string, taskID string) *admin.TaskEx func TestGetExecutionDetails(t *testing.T) { t.Run("successful get details default view", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -226,8 +225,7 @@ func TestGetExecutionDetails(t *testing.T) { }) t.Run("successful get details default view for node-id", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -291,8 +289,7 @@ func TestGetExecutionDetails(t *testing.T) { }) t.Run("failure task exec fetch", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx diff --git a/flytectl/cmd/get/project_test.go b/flytectl/cmd/get/project_test.go index 7bcc55a236..7b53a77a67 100644 --- a/flytectl/cmd/get/project_test.go +++ b/flytectl/cmd/get/project_test.go @@ -51,8 +51,7 @@ func getProjectSetup() { } func TestListProjectFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() project.DefaultConfig.Filter = filters.Filters{} @@ -65,8 +64,7 @@ func TestListProjectFunc(t *testing.T) { } func TestGetProjectFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() argsProject = []string{} @@ -80,8 +78,7 @@ func TestGetProjectFunc(t *testing.T) { } func TestGetProjectFuncError(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() project.DefaultConfig.Filter = filters.Filters{ diff --git a/flytectl/cmd/get/task_test.go b/flytectl/cmd/get/task_test.go index 27e65d3fef..d0f817fd1e 100644 --- a/flytectl/cmd/get/task_test.go +++ b/flytectl/cmd/get/task_test.go @@ -170,8 +170,7 @@ func getTaskSetup() { func TestGetTaskFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -184,8 +183,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -198,8 +196,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -211,8 +208,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(nil, fmt.Errorf("error fetching all version")) @@ -225,8 +221,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching list task", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -242,8 +237,7 @@ func TestGetTaskFuncWithError(t *testing.T) { } func TestGetTaskFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -329,8 +323,7 @@ func TestGetTaskFunc(t *testing.T) { } func TestGetTaskFuncWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -355,8 +348,7 @@ func TestGetTaskFuncWithTable(t *testing.T) { } func TestGetTaskFuncLatest(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -406,8 +398,7 @@ func TestGetTaskFuncLatest(t *testing.T) { } func TestGetTaskWithVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -458,8 +449,7 @@ func TestGetTaskWithVersion(t *testing.T) { } func TestGetTasks(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -473,8 +463,7 @@ func TestGetTasks(t *testing.T) { } func TestGetTasksFilters(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{ @@ -498,8 +487,7 @@ func TestGetTasksFilters(t *testing.T) { } func TestGetTaskWithExecFile(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) diff --git a/flytectl/cmd/get/workflow_test.go b/flytectl/cmd/get/workflow_test.go index 3e01067750..20aa12e011 100644 --- a/flytectl/cmd/get/workflow_test.go +++ b/flytectl/cmd/get/workflow_test.go @@ -93,8 +93,7 @@ func getWorkflowSetup() { func TestGetWorkflowFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -105,8 +104,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -118,8 +116,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -130,8 +127,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Latest = true @@ -143,8 +139,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("fetching all workflow success", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() var args []string @@ -155,8 +150,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("fetching all workflow error", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() var args []string @@ -169,8 +163,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { } func TestGetWorkflowFuncLatestWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Latest = true @@ -189,8 +182,7 @@ func TestGetWorkflowFuncLatestWithTable(t *testing.T) { } func TestListWorkflowFuncWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Filter = filters.Filters{} diff --git a/flytectl/cmd/register/examples_test.go b/flytectl/cmd/register/examples_test.go index fc3996f185..3af84d0957 100644 --- a/flytectl/cmd/register/examples_test.go +++ b/flytectl/cmd/register/examples_test.go @@ -3,18 +3,21 @@ package register import ( "testing" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) func TestRegisterExamplesFunc(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{""} err := registerExamplesFunc(s.Ctx, args, s.CmdCtx) assert.NotNil(t, err) } func TestRegisterExamplesFuncErr(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() flytesnacks = "testingsnacks" args := []string{""} diff --git a/flytectl/cmd/register/files_test.go b/flytectl/cmd/register/files_test.go index 45827b7e13..1c468eb0a6 100644 --- a/flytectl/cmd/register/files_test.go +++ b/flytectl/cmd/register/files_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" rconfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/register" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/contextutils" @@ -22,7 +23,8 @@ const ( func TestRegisterFromFiles(t *testing.T) { t.Run("Valid registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-parent-folder-register.tar"} @@ -34,7 +36,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Valid fast registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) registerFilesSetup() @@ -59,7 +62,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Register a workflow with a failure node", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) registerFilesSetup() @@ -84,7 +88,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed fast registration while uploading the codebase", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -105,7 +110,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed registration because of invalid files", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -125,7 +131,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.NotNil(t, err) }) t.Run("Failure registration of fast serialize", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -148,7 +155,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Equal(t, fmt.Errorf("failed"), err) }) t.Run("Failure registration of fast serialize continue on error", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -172,7 +180,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Equal(t, fmt.Errorf("failed"), err) }) t.Run("Valid registration of fast serialize", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -196,7 +205,8 @@ func TestRegisterFromFiles(t *testing.T) { }) t.Run("Registration with proto files ", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) diff --git a/flytectl/cmd/register/register_test.go b/flytectl/cmd/register/register_test.go index 43e89a3961..bf0a0affbb 100644 --- a/flytectl/cmd/register/register_test.go +++ b/flytectl/cmd/register/register_test.go @@ -6,7 +6,6 @@ import ( "sort" "testing" - u "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -14,8 +13,6 @@ var ( GetDoFunc func(req *http.Request) (*http.Response, error) ) -var setup = u.Setup - func TestRegisterCommand(t *testing.T) { registerCommand := RemoteRegisterCommand() assert.Equal(t, registerCommand.Use, "register") diff --git a/flytectl/cmd/register/register_util_test.go b/flytectl/cmd/register/register_util_test.go index b6625c27de..e068c0f64a 100644 --- a/flytectl/cmd/register/register_util_test.go +++ b/flytectl/cmd/register/register_util_test.go @@ -13,6 +13,7 @@ import ( "testing" rconfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/register" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" ghMocks "github.com/flyteorg/flyte/flytectl/pkg/github/mocks" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -61,7 +62,8 @@ func registerFilesSetup() { } func TestGetSortedArchivedFileWithParentFolderList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-parent-folder-register.tar"} @@ -78,7 +80,8 @@ func TestGetSortedArchivedFileWithParentFolderList(t *testing.T) { } func TestGetSortedArchivedFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-register.tar"} @@ -95,7 +98,8 @@ func TestGetSortedArchivedFileList(t *testing.T) { } func TestGetSortedArchivedFileUnorderedList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-unordered-register.tar"} @@ -112,7 +116,8 @@ func TestGetSortedArchivedFileUnorderedList(t *testing.T) { } func TestGetSortedArchivedCorruptedFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid.tar"} @@ -125,7 +130,8 @@ func TestGetSortedArchivedCorruptedFileList(t *testing.T) { } func TestGetSortedArchivedTgzList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-register.tgz"} @@ -142,7 +148,8 @@ func TestGetSortedArchivedTgzList(t *testing.T) { } func TestGetSortedArchivedCorruptedTgzFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid.tgz"} fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) @@ -154,7 +161,8 @@ func TestGetSortedArchivedCorruptedTgzFileList(t *testing.T) { } func TestGetSortedArchivedInvalidArchiveFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid-extension-register.zip"} @@ -168,7 +176,8 @@ func TestGetSortedArchivedInvalidArchiveFileList(t *testing.T) { } func TestGetSortedArchivedFileThroughInvalidHttpList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + rconfig.DefaultFilesConfig.Archive = true args := []string{"http://invalidhost:invalidport/testdata/valid-register.tar"} fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) @@ -180,7 +189,8 @@ func TestGetSortedArchivedFileThroughInvalidHttpList(t *testing.T) { } func TestGetSortedArchivedFileThroughValidHttpList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"http://dummyhost:80/testdata/valid-register.tar"} @@ -197,7 +207,8 @@ func TestGetSortedArchivedFileThroughValidHttpList(t *testing.T) { } func TestGetSortedArchivedFileThroughValidHttpWithNullContextList(t *testing.T) { - setup() + testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"http://dummyhost:80/testdata/valid-register.tar"} @@ -220,7 +231,8 @@ func Test_getTotalSize(t *testing.T) { func TestRegisterFile(t *testing.T) { t.Run("Successful run", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} @@ -230,7 +242,8 @@ func TestRegisterFile(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed Scheduled launch plan registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) variableMap := map[string]*core.Variable{ @@ -284,7 +297,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("Non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{"testdata/non-existent.pb"} var registerResults []Result @@ -295,7 +309,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("unmarhal failure", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{"testdata/valid-register.tar"} var registerResults []Result @@ -306,7 +321,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("AlreadyExists", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, status.Error(codes.AlreadyExists, "AlreadyExists")) @@ -319,7 +335,8 @@ func TestRegisterFile(t *testing.T) { assert.Nil(t, err) }) t.Run("Registration Error", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, status.Error(codes.InvalidArgument, "Invalid")) @@ -335,7 +352,8 @@ func TestRegisterFile(t *testing.T) { func TestHydrateLaunchPlanSpec(t *testing.T) { t.Run("IamRole override", func(t *testing.T) { - setup() + testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.AssumableIamRole = "iamRole" lpSpec := &admin.LaunchPlanSpec{} @@ -376,7 +394,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { func TestUploadFastRegisterArtifact(t *testing.T) { t.Run("Successful upload", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) store, err := storage.NewDataStore(&storage.Config{ @@ -394,7 +413,8 @@ func TestUploadFastRegisterArtifact(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed upload", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) store, err := storage.NewDataStore(&storage.Config{ @@ -472,7 +492,8 @@ func TestGetAllFlytesnacksExample(t *testing.T) { func TestRegister(t *testing.T) { t.Run("Failed to register", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() node := &admin.NodeExecution{} err := register(s.Ctx, node, s.CmdCtx, rconfig.DefaultFilesConfig.DryRun, rconfig.DefaultFilesConfig.EnableSchedule) @@ -685,20 +706,23 @@ func TestLeftDiff(t *testing.T) { func TestValidateLaunchSpec(t *testing.T) { ctx := context.Background() t.Run("nil launchplan spec", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() err := validateLaunchSpec(ctx, nil, s.CmdCtx) assert.Nil(t, err) }) t.Run("launchplan spec with nil workflow id", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{} err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) assert.Nil(t, err) }) t.Run("launchplan spec with empty metadata", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ @@ -712,7 +736,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Nil(t, err) }) t.Run("launchplan spec with metadata and empty schedule", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ @@ -727,7 +752,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Nil(t, err) }) t.Run("validate spec failed to fetch workflow", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -752,7 +778,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Equal(t, "failed", err.Error()) }) t.Run("failed to fetch workflow", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -774,7 +801,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Equal(t, "failed", err.Error()) }) t.Run("launchplan spec missing required param schedule", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { @@ -836,7 +864,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Contains(t, err.Error(), "param values are missing on scheduled workflow for the following params") }) t.Run("launchplan spec non empty schedule default param success", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { @@ -941,7 +970,8 @@ func TestValidateLaunchSpec(t *testing.T) { }) t.Run("launchplan spec non empty schedule required param without value fail", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { diff --git a/flytectl/cmd/sandbox/exec_test.go b/flytectl/cmd/sandbox/exec_test.go index 1fbe8dcadd..b7ff48e01e 100644 --- a/flytectl/cmd/sandbox/exec_test.go +++ b/flytectl/cmd/sandbox/exec_test.go @@ -50,7 +50,7 @@ func TestSandboxClusterExec(t *testing.T) { func TestSandboxClusterExecWithoutCmd(t *testing.T) { mockDocker := &mocks.Docker{} reader := bufio.NewReader(strings.NewReader("test")) - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/sandbox/status_test.go b/flytectl/cmd/sandbox/status_test.go index 41f43fadc7..03aa302e70 100644 --- a/flytectl/cmd/sandbox/status_test.go +++ b/flytectl/cmd/sandbox/status_test.go @@ -14,14 +14,14 @@ import ( func TestSandboxStatus(t *testing.T) { t.Run("Sandbox status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) docker.Client = mockDocker err := sandboxClusterStatus(s.Ctx, []string{}, s.CmdCtx) assert.Nil(t, err) }) t.Run("Sandbox status with running sandbox", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/sandbox/teardown_test.go b/flytectl/cmd/sandbox/teardown_test.go index cb8c765138..8bead79cdb 100644 --- a/flytectl/cmd/sandbox/teardown_test.go +++ b/flytectl/cmd/sandbox/teardown_test.go @@ -20,7 +20,7 @@ func TestTearDownClusterFunc(t *testing.T) { var containers []types.Container _ = util.SetupFlyteDir() _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return(containers, nil) diff --git a/flytectl/cmd/testutils/test_utils.go b/flytectl/cmd/testutils/test_utils.go index 1e2bba1365..037d0d8374 100644 --- a/flytectl/cmd/testutils/test_utils.go +++ b/flytectl/cmd/testutils/test_utils.go @@ -41,8 +41,7 @@ type TestStruct struct { Stderr *os.File } -// Make sure to call TearDown after using this function -func Setup() (s TestStruct) { +func Setup(t *testing.T) (s TestStruct) { s.Ctx = context.Background() s.Reader, s.Writer, s.Err = os.Pipe() if s.Err != nil { @@ -67,12 +66,13 @@ func Setup() (s TestStruct) { config.GetConfig().Domain = domainValue config.GetConfig().Output = output - return s -} + // We need to make sure that the original final descriptors are restored after the test + t.Cleanup(func() { + os.Stdout = s.StdOut + os.Stderr = s.Stderr + }) -func (s *TestStruct) TearDown() { - os.Stdout = s.StdOut - os.Stderr = s.Stderr + return s } // TearDownAndVerify TODO: Change this to verify log lines from context diff --git a/flytectl/cmd/update/execution_test.go b/flytectl/cmd/update/execution_test.go index d8e2db59e7..fbcb0b02e9 100644 --- a/flytectl/cmd/update/execution_test.go +++ b/flytectl/cmd/update/execution_test.go @@ -16,6 +16,7 @@ import ( func TestExecutionCanBeActivated(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -34,6 +35,7 @@ func TestExecutionCanBeActivated(t *testing.T) { func TestExecutionCanBeArchived(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE config.Archive = true @@ -52,6 +54,7 @@ func TestExecutionCanBeArchived(t *testing.T) { func TestExecutionCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { config.Activate = true config.Archive = true @@ -64,6 +67,7 @@ func TestExecutionCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { func TestExecutionUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE config.Activate = true @@ -77,6 +81,7 @@ func TestExecutionUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestExecutionUpdateWithoutForceFlagFails(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -90,6 +95,7 @@ func TestExecutionUpdateWithoutForceFlagFails(t *testing.T) { func TestExecutionUpdateDoesNothingWithDryRunFlag(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -104,6 +110,7 @@ func TestExecutionUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -119,6 +126,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -135,6 +143,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -153,6 +162,7 @@ func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -174,8 +184,7 @@ func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestExecutionUpdateRequiresExecutionName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) err := updateExecutionFunc(s.Ctx, nil, s.CmdCtx) @@ -183,10 +192,12 @@ func TestExecutionUpdateRequiresExecutionName(t *testing.T) { } func testExecutionUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), asserter func(s *testutils.TestStruct, err error), ) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -201,12 +212,12 @@ func testExecutionUpdate( } func testExecutionUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, execution *admin.Execution), setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) target := newTestExecution() @@ -217,7 +228,6 @@ func testExecutionUpdateWithMockSetup( execution.UConfig = &execution.UpdateConfig{} if setup != nil { setup(&s, execution.UConfig, target) - defer s.TearDown() } args := []string{target.Id.Name} diff --git a/flytectl/cmd/update/launch_plan_meta_test.go b/flytectl/cmd/update/launch_plan_meta_test.go index 63d4ded737..aeb2e1638d 100644 --- a/flytectl/cmd/update/launch_plan_meta_test.go +++ b/flytectl/cmd/update/launch_plan_meta_test.go @@ -13,7 +13,7 @@ import ( ) func TestLaunchPlanMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +31,7 @@ func TestLaunchPlanMetadataCanBeActivated(t *testing.T) { } func TestLaunchPlanMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +49,7 @@ func TestLaunchPlanMetadataCanBeArchived(t *testing.T) { } func TestLaunchPlanMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +61,7 @@ func TestLaunchPlanMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing. } func TestLaunchPlanMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +74,7 @@ func TestLaunchPlanMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) } func TestLaunchPlanMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +87,7 @@ func TestLaunchPlanMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestLaunchPlanMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +101,7 @@ func TestLaunchPlanMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +116,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +133,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T func TestLaunchPlanMetadataUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_LAUNCH_PLAN, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +155,7 @@ func TestLaunchPlanMetadataUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { func TestLaunchPlanMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_LAUNCH_PLAN, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,8 +180,7 @@ func TestLaunchPlanMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestLaunchPlanMetadataUpdateRequiresLaunchPlanName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} diff --git a/flytectl/cmd/update/launch_plan_test.go b/flytectl/cmd/update/launch_plan_test.go index 5704702a2e..249a810118 100644 --- a/flytectl/cmd/update/launch_plan_test.go +++ b/flytectl/cmd/update/launch_plan_test.go @@ -16,6 +16,7 @@ import ( func TestLaunchPlanCanBeActivated(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -34,6 +35,7 @@ func TestLaunchPlanCanBeActivated(t *testing.T) { func TestLaunchPlanCanBeArchived(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Archive = true @@ -52,6 +54,7 @@ func TestLaunchPlanCanBeArchived(t *testing.T) { func TestLaunchPlanCanBeDeactivated(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Deactivate = true @@ -70,6 +73,7 @@ func TestLaunchPlanCanBeDeactivated(t *testing.T) { func TestLaunchPlanCannotBeActivatedAndDeactivatedAtTheSameTime(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { config.Activate = true config.Deactivate = true @@ -82,6 +86,7 @@ func TestLaunchPlanCannotBeActivatedAndDeactivatedAtTheSameTime(t *testing.T) { func TestLaunchPlanUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Activate = true @@ -95,6 +100,7 @@ func TestLaunchPlanUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestLaunchPlanUpdateWithoutForceFlagFails(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -108,6 +114,7 @@ func TestLaunchPlanUpdateWithoutForceFlagFails(t *testing.T) { func TestLaunchPlanUpdateDoesNothingWithDryRunFlag(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -122,6 +129,7 @@ func TestLaunchPlanUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -137,6 +145,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -153,6 +162,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { func TestLaunchPlanUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -173,6 +183,7 @@ func TestLaunchPlanUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { func TestLaunchPlanUpdateFailsWhenAdminClientFails(t *testing.T) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -196,8 +207,7 @@ func TestLaunchPlanUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestLaunchPlanUpdateRequiresLaunchPlanName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) launchplan.UConfig = &launchplan.UpdateConfig{} @@ -211,8 +221,7 @@ func TestLaunchPlanUpdateRequiresLaunchPlanName(t *testing.T) { } func TestLaunchPlanUpdateRequiresLaunchPlanVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) launchplan.UConfig = &launchplan.UpdateConfig{} @@ -226,10 +235,12 @@ func TestLaunchPlanUpdateRequiresLaunchPlanVersion(t *testing.T) { } func testLaunchPlanUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), asserter func(s *testutils.TestStruct, err error), ) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -246,12 +257,12 @@ func testLaunchPlanUpdate( } func testLaunchPlanUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, launchplan *admin.LaunchPlan), setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) target := newTestLaunchPlan() @@ -262,7 +273,6 @@ func testLaunchPlanUpdateWithMockSetup( launchplan.UConfig = &launchplan.UpdateConfig{} if setup != nil { setup(&s, launchplan.UConfig, target) - defer s.TearDown() } args := []string{target.Id.Name} diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go index c902794685..b7288d6dcc 100644 --- a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestClusterResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestClusterResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestClusterResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *tes func TestClusterResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *test func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = true @@ -82,6 +87,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = true @@ -97,6 +103,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = false @@ -121,6 +129,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = false @@ -135,6 +144,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("workflow with --force", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("domain without --force", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = false @@ -212,6 +227,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("domain with --force", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = true @@ -225,6 +241,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("project without --force", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = false @@ -238,6 +255,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("project with --force", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = true @@ -253,6 +271,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -274,6 +293,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -295,6 +315,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -318,6 +339,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -338,6 +360,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -358,6 +381,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -378,10 +402,12 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -396,12 +422,12 @@ func testWorkflowClusterResourceAttributeUpdate( } func testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestWorkflowClusterResourceAttribute() @@ -412,7 +438,6 @@ func testWorkflowClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -446,10 +471,12 @@ func newTestWorkflowClusterResourceAttribute() *admin.WorkflowAttributes { } func testProjectClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -464,12 +491,12 @@ func testProjectClusterResourceAttributeUpdate( } func testProjectClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestProjectClusterResourceAttribute() @@ -480,7 +507,6 @@ func testProjectClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -512,10 +538,12 @@ func newTestProjectClusterResourceAttribute() *admin.ProjectAttributes { } func testProjectDomainClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -530,11 +558,13 @@ func testProjectDomainClusterResourceAttributeUpdate( } func testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestProjectDomainClusterResourceAttribute() @@ -544,7 +574,6 @@ func testProjectDomainClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/update/matchable_execution_cluster_label_test.go b/flytectl/cmd/update/matchable_execution_cluster_label_test.go index 0bbccbc83b..1006234626 100644 --- a/flytectl/cmd/update/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/update/matchable_execution_cluster_label_test.go @@ -20,6 +20,7 @@ const ( func TestExecutionClusterLabelUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestExecutionClusterLabelUpdateRequiresAttributeFile(t *testing.T) { func TestExecutionClusterLabelUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAttributeFileDoesNotExist(t *testin func TestExecutionClusterLabelUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAttributeFileIsMalformed(t *testing func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = true @@ -69,6 +73,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = true @@ -82,6 +87,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = true @@ -97,6 +103,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = false @@ -109,6 +116,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = false @@ -121,6 +129,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = false @@ -135,6 +144,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = false @@ -186,6 +199,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = true @@ -199,6 +213,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = false @@ -212,6 +227,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = true @@ -225,6 +241,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = false @@ -238,6 +255,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = true @@ -253,6 +271,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -274,6 +293,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -295,6 +315,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -318,6 +339,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -338,6 +360,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -358,6 +381,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -378,10 +402,12 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -396,11 +422,13 @@ func testWorkflowExecutionClusterLabelUpdate( } func testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestWorkflowExecutionClusterLabel() @@ -439,10 +467,12 @@ func newTestWorkflowExecutionClusterLabel() *admin.WorkflowAttributes { } func testProjectExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -457,11 +487,13 @@ func testProjectExecutionClusterLabelUpdate( } func testProjectExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestProjectExecutionClusterLabel() @@ -498,10 +530,12 @@ func newTestProjectExecutionClusterLabel() *admin.ProjectAttributes { } func testProjectDomainExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -516,11 +550,13 @@ func testProjectDomainExecutionClusterLabelUpdate( } func testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestProjectDomainExecutionClusterLabel() diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go index a88dc80717..e16526faa6 100644 --- a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestExecutionQueueAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestExecutionQueueAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *test func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testi func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = true @@ -82,6 +87,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = true @@ -97,6 +103,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = false @@ -121,6 +129,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = false @@ -135,6 +144,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = false @@ -212,6 +227,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = true @@ -225,6 +241,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = false @@ -238,6 +255,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = true @@ -253,6 +271,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -274,6 +293,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -295,6 +315,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -318,6 +339,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -338,6 +360,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -358,6 +381,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -378,10 +402,12 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -396,11 +422,13 @@ func testWorkflowExecutionQueueAttributeUpdate( } func testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestWorkflowExecutionQueueAttribute() @@ -410,7 +438,6 @@ func testWorkflowExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -444,10 +471,12 @@ func newTestWorkflowExecutionQueueAttribute() *admin.WorkflowAttributes { } func testProjectExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -462,12 +491,12 @@ func testProjectExecutionQueueAttributeUpdate( } func testProjectExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestProjectExecutionQueueAttribute() @@ -478,7 +507,6 @@ func testProjectExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -510,10 +538,12 @@ func newTestProjectExecutionQueueAttribute() *admin.ProjectAttributes { } func testProjectDomainExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -528,11 +558,13 @@ func testProjectDomainExecutionQueueAttributeUpdate( } func testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestProjectDomainExecutionQueueAttribute() @@ -542,7 +574,6 @@ func testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/update/matchable_plugin_override_test.go b/flytectl/cmd/update/matchable_plugin_override_test.go index 3207951db6..3b0181392b 100644 --- a/flytectl/cmd/update/matchable_plugin_override_test.go +++ b/flytectl/cmd/update/matchable_plugin_override_test.go @@ -20,6 +20,7 @@ const ( func TestPluginOverrideUpdateRequiresAttributeFile(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestPluginOverrideUpdateRequiresAttributeFile(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestPluginOverrideUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestPluginOverrideUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = true @@ -69,6 +73,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = true @@ -82,6 +87,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = true @@ -97,6 +103,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = false @@ -109,6 +116,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = false @@ -121,6 +129,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = false @@ -135,6 +144,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = false @@ -186,6 +199,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = true @@ -199,6 +213,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = false @@ -212,6 +227,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = true @@ -225,6 +241,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = false @@ -238,6 +255,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = true @@ -253,6 +271,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -274,6 +293,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -295,6 +315,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -318,6 +339,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -338,6 +360,7 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -358,6 +381,7 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -378,10 +402,12 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -396,11 +422,13 @@ func testWorkflowPluginOverrideUpdate( } func testWorkflowPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestWorkflowPluginOverride() @@ -449,10 +477,12 @@ func newTestWorkflowPluginOverride() *admin.WorkflowAttributes { } func testProjectPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -467,11 +497,13 @@ func testProjectPluginOverrideUpdate( } func testProjectPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestProjectPluginOverride() @@ -518,10 +550,12 @@ func newTestProjectPluginOverride() *admin.ProjectAttributes { } func testProjectDomainPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -536,11 +570,13 @@ func testProjectDomainPluginOverrideUpdate( } func testProjectDomainPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestProjectDomainPluginOverride() diff --git a/flytectl/cmd/update/matchable_task_resource_attribute_test.go b/flytectl/cmd/update/matchable_task_resource_attribute_test.go index 6e54b17e34..42c2c3ab4f 100644 --- a/flytectl/cmd/update/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_task_resource_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestTaskResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestTaskResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestTaskResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testin func TestTaskResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = true @@ -82,6 +87,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = true @@ -97,6 +103,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = false @@ -121,6 +129,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = false @@ -135,6 +144,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = false @@ -212,6 +227,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = true @@ -225,6 +241,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = false @@ -238,6 +255,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = true @@ -253,6 +271,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -274,6 +293,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -295,6 +315,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -318,6 +339,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -338,6 +360,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -358,6 +381,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -378,10 +402,12 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -396,11 +422,13 @@ func testWorkflowTaskResourceAttributeUpdate( } func testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestWorkflowTaskResourceAttribute() @@ -442,10 +470,12 @@ func newTestWorkflowTaskResourceAttribute() *admin.WorkflowAttributes { } func testProjectTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -460,11 +490,13 @@ func testProjectTaskResourceAttributeUpdate( } func testProjectTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestProjectTaskResourceAttribute() @@ -504,10 +536,12 @@ func newTestProjectTaskResourceAttribute() *admin.ProjectAttributes { } func testProjectDomainTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -522,11 +556,13 @@ func testProjectDomainTaskResourceAttributeUpdate( } func testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestProjectDomainTaskResourceAttribute() diff --git a/flytectl/cmd/update/matchable_workflow_execution_config_test.go b/flytectl/cmd/update/matchable_workflow_execution_config_test.go index 06e0996d37..c75b2fd58f 100644 --- a/flytectl/cmd/update/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/update/matchable_workflow_execution_config_test.go @@ -20,6 +20,7 @@ const ( func TestWorkflowExecutionConfigUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestWorkflowExecutionConfigUpdateRequiresAttributeFile(t *testing.T) { func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileDoesNotExist(t *test func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileIsMalformed(t *testi func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = true @@ -69,6 +73,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = true @@ -82,6 +87,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = true @@ -97,6 +103,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = false @@ -109,6 +116,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = false @@ -121,6 +129,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = false @@ -135,6 +144,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = false @@ -186,6 +199,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = true @@ -199,6 +213,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = false @@ -212,6 +227,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = true @@ -225,6 +241,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = false @@ -238,6 +255,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = true @@ -253,6 +271,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -274,6 +293,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -295,6 +315,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -318,6 +339,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -338,6 +360,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -358,6 +381,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -378,10 +402,12 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -396,11 +422,13 @@ func testWorkflowExecutionConfigUpdate( } func testWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestWorkflowExecutionConfig() @@ -446,10 +474,12 @@ func newTestWorkflowExecutionConfig() *admin.WorkflowAttributes { } func testProjectWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -464,11 +494,13 @@ func testProjectWorkflowExecutionConfigUpdate( } func testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestProjectWorkflowExecutionConfig() @@ -512,10 +544,12 @@ func newTestProjectWorkflowExecutionConfig() *admin.ProjectAttributes { } func testProjectDomainWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -530,11 +564,13 @@ func testProjectDomainWorkflowExecutionConfigUpdate( } func testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestProjectDomainWorkflowExecutionConfig() diff --git a/flytectl/cmd/update/named_entity_test.go b/flytectl/cmd/update/named_entity_test.go index 2dbb50fba5..4d4e5b2783 100644 --- a/flytectl/cmd/update/named_entity_test.go +++ b/flytectl/cmd/update/named_entity_test.go @@ -3,6 +3,7 @@ package update import ( "context" "fmt" + "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" @@ -13,11 +14,13 @@ import ( ) func testNamedEntityUpdate( + t *testing.T, resourceType core.ResourceType, setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), asserter func(s *testutils.TestStruct, err error), ) { testNamedEntityUpdateWithMockSetup( + t, resourceType, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -35,13 +38,13 @@ func testNamedEntityUpdate( } func testNamedEntityUpdateWithMockSetup( + t *testing.T, resourceType core.ResourceType, mockSetup func(s *testutils.TestStruct, namedEntity *admin.NamedEntity), setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} target := newTestNamedEntity(resourceType) @@ -52,7 +55,6 @@ func testNamedEntityUpdateWithMockSetup( if setup != nil { setup(&s, config, target) - defer s.TearDown() } updateMetadataFactory := getUpdateMetadataFactory(resourceType) diff --git a/flytectl/cmd/update/project_test.go b/flytectl/cmd/update/project_test.go index c5785e0a12..0ca41c4309 100644 --- a/flytectl/cmd/update/project_test.go +++ b/flytectl/cmd/update/project_test.go @@ -15,6 +15,7 @@ import ( func TestProjectCanBeActivated(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -33,6 +34,7 @@ func TestProjectCanBeActivated(t *testing.T) { func TestProjectCanBeArchived(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ACTIVE config.Archive = true @@ -51,6 +53,7 @@ func TestProjectCanBeArchived(t *testing.T) { func TestProjectCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { config.Activate = true config.Archive = true @@ -63,6 +66,7 @@ func TestProjectCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { func TestProjectUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ACTIVE config.Activate = true @@ -76,6 +80,7 @@ func TestProjectUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestProjectUpdateWithoutForceFlagFails(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -89,6 +94,7 @@ func TestProjectUpdateWithoutForceFlagFails(t *testing.T) { func TestProjectUpdateDoesNothingWithDryRunFlag(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -103,6 +109,7 @@ func TestProjectUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -118,6 +125,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -134,6 +142,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -152,6 +161,7 @@ func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -174,6 +184,7 @@ func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { func TestProjectUpdateRequiresProjectId(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { config.ID = "" }, @@ -184,6 +195,7 @@ func TestProjectUpdateRequiresProjectId(t *testing.T) { func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = false @@ -203,10 +215,12 @@ func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { } func testProjectUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), asserter func(s *testutils.TestStruct, err error), ) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -221,11 +235,13 @@ func testProjectUpdate( } func testProjectUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, project *admin.Project), setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + target := newTestProject() if mockSetup != nil { diff --git a/flytectl/cmd/update/task_meta_test.go b/flytectl/cmd/update/task_meta_test.go index 09cc573115..69cd7c4072 100644 --- a/flytectl/cmd/update/task_meta_test.go +++ b/flytectl/cmd/update/task_meta_test.go @@ -13,7 +13,7 @@ import ( ) func TestTaskMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +31,7 @@ func TestTaskMetadataCanBeActivated(t *testing.T) { } func TestTaskMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +49,7 @@ func TestTaskMetadataCanBeArchived(t *testing.T) { } func TestTaskMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +61,7 @@ func TestTaskMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { } func TestTaskMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +74,7 @@ func TestTaskMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { } func TestTaskMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +87,7 @@ func TestTaskMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestTaskMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +101,7 @@ func TestTaskMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +116,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +133,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { func TestTaskMetadataUpdateFailsWhenTaskDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_TASK, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +155,7 @@ func TestTaskMetadataUpdateFailsWhenTaskDoesNotExist(t *testing.T) { func TestTaskMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_TASK, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,8 +180,7 @@ func TestTaskMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestTaskMetadataUpdateRequiresTaskName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} diff --git a/flytectl/cmd/update/workflow_meta_test.go b/flytectl/cmd/update/workflow_meta_test.go index 05589ee6c7..fc620b44aa 100644 --- a/flytectl/cmd/update/workflow_meta_test.go +++ b/flytectl/cmd/update/workflow_meta_test.go @@ -13,7 +13,9 @@ import ( ) func TestWorkflowMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +33,9 @@ func TestWorkflowMetadataCanBeActivated(t *testing.T) { } func TestWorkflowMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +53,9 @@ func TestWorkflowMetadataCanBeArchived(t *testing.T) { } func TestWorkflowMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +67,9 @@ func TestWorkflowMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) } func TestWorkflowMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +82,9 @@ func TestWorkflowMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { } func TestWorkflowMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +97,9 @@ func TestWorkflowMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestWorkflowMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +113,9 @@ func TestWorkflowMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +130,9 @@ func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +149,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) func TestWorkflowMetadataUpdateFailsWhenWorkflowDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_WORKFLOW, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +171,7 @@ func TestWorkflowMetadataUpdateFailsWhenWorkflowDoesNotExist(t *testing.T) { func TestWorkflowMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_WORKFLOW, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,7 +196,8 @@ func TestWorkflowMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestWorkflowMetadataUpdateRequiresWorkflowName(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + config := &NamedEntityConfig{} err := getUpdateWorkflowFunc(config)(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/upgrade/upgrade_test.go b/flytectl/cmd/upgrade/upgrade_test.go index d4132f1df4..dd451d13b5 100644 --- a/flytectl/cmd/upgrade/upgrade_test.go +++ b/flytectl/cmd/upgrade/upgrade_test.go @@ -104,7 +104,8 @@ func TestSelfUpgrade(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful upgrade", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = version @@ -118,7 +119,8 @@ func TestSelfUpgradeError(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful upgrade", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = "v" @@ -133,7 +135,8 @@ func TestSelfUpgradeRollback(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful rollback", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -142,7 +145,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback failed", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -151,7 +155,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback for windows", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -161,7 +166,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback for windows", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" diff --git a/flytectl/cmd/version/version_test.go b/flytectl/cmd/version/version_test.go index 791a895e46..a397ab8199 100644 --- a/flytectl/cmd/version/version_test.go +++ b/flytectl/cmd/version/version_test.go @@ -54,8 +54,7 @@ func TestVersionCommand(t *testing.T) { func TestVersionCommandFunc(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = testVersion @@ -67,8 +66,7 @@ func TestVersionCommandFunc(t *testing.T) { func TestVersionCommandFuncError(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = "v" @@ -80,8 +78,7 @@ func TestVersionCommandFuncError(t *testing.T) { func TestVersionCommandFuncErr(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = testVersion diff --git a/flytectl/go.mod b/flytectl/go.mod index 56b4928ec8..b657a02d4d 100644 --- a/flytectl/go.mod +++ b/flytectl/go.mod @@ -35,10 +35,10 @@ require ( github.com/stretchr/testify v1.9.0 github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0 github.com/zalando/go-keyring v0.1.1 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/text v0.16.0 google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v3 v3.0.1 gotest.tools v2.2.0+incompatible k8s.io/api v0.28.4 @@ -66,7 +66,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/containerd/console v1.0.4-0.20230313162750-1ae8d489ac81 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coocood/freecache v1.1.1 // indirect @@ -121,7 +121,6 @@ require ( github.com/mattn/go-isatty v0.0.18 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -136,10 +135,10 @@ require ( github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shamaton/msgpack/v2 v2.2.2 // indirect diff --git a/flytectl/go.sum b/flytectl/go.sum index bd36624647..f57ca65c0a 100644 --- a/flytectl/go.sum +++ b/flytectl/go.sum @@ -85,8 +85,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0= github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw= github.com/charmbracelet/bubbletea v0.25.0 h1:bAfwk7jRz7FKFl9RzlIULPkStffg5k6pNt5dywy4TcM= @@ -351,8 +351,6 @@ github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+Ei github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -402,15 +400,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= @@ -599,8 +597,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -846,8 +844,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/flytectl/pkg/configutil/configutil.go b/flytectl/pkg/configutil/configutil.go index df2f099da5..c018052d91 100644 --- a/flytectl/pkg/configutil/configutil.go +++ b/flytectl/pkg/configutil/configutil.go @@ -16,17 +16,6 @@ const ( console: endpoint: {{.Console}} {{- end}} -{{- if .DataConfig}} -# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please -# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to -# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as -# minio is s3 compatible and we ship with minio in sandbox. -storage: - connection: - endpoint: {{.DataConfig.Endpoint}} - access-key: {{.DataConfig.AccessKey}} - secret-key: {{.DataConfig.SecretKey}} -{{- end}} ` ) diff --git a/flytectl/pkg/configutil/configutil_test.go b/flytectl/pkg/configutil/configutil_test.go index ccdf5035ef..10f8553247 100644 --- a/flytectl/pkg/configutil/configutil_test.go +++ b/flytectl/pkg/configutil/configutil_test.go @@ -1,6 +1,7 @@ package configutil import ( + "io" "io/ioutil" "os" "testing" @@ -20,7 +21,7 @@ func TestSetupConfig(t *testing.T) { } err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) assert.NoError(t, err) - configBytes, err := ioutil.ReadAll(file) + configBytes, err := io.ReadAll(file) assert.NoError(t, err) expected := `admin: # For GRPC endpoints you might want to use dns:///flyte.myexample.com @@ -62,21 +63,12 @@ console: } err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) assert.NoError(t, err) - configBytes, err = ioutil.ReadAll(file) + configBytes, err = io.ReadAll(file) assert.NoError(t, err) expected = `admin: # For GRPC endpoints you might want to use dns:///flyte.myexample.com endpoint: dns:///admin.example.com insecure: true -# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please -# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to -# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as -# minio is s3 compatible and we ship with minio in sandbox. -storage: - connection: - endpoint: http://localhost:9000 - access-key: my-access-key - secret-key: my-secret-key ` assert.Equal(t, expected, string(configBytes)) @@ -91,8 +83,8 @@ func TestConfigCleanup(t *testing.T) { if os.IsNotExist(err) { _ = os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte"), 0755) } - _ = ioutil.WriteFile(FlytectlConfig, []byte("string"), 0600) - _ = ioutil.WriteFile(Kubeconfig, []byte("string"), 0600) + _ = os.WriteFile(FlytectlConfig, []byte("string"), 0600) + _ = os.WriteFile(Kubeconfig, []byte("string"), 0600) err = ConfigCleanup() assert.Nil(t, err) diff --git a/flytectl/pkg/sandbox/start.go b/flytectl/pkg/sandbox/start.go index 8689aca6f2..6681baf5e1 100644 --- a/flytectl/pkg/sandbox/start.go +++ b/flytectl/pkg/sandbox/start.go @@ -175,13 +175,8 @@ func startSandbox(ctx context.Context, cli docker.Docker, g github.GHRepoService } templateValues := configutil.ConfigTemplateSpec{ - Host: "localhost:30080", + Host: "dns:///localhost:30080", Insecure: true, - DataConfig: &configutil.DataConfig{ - Endpoint: "http://localhost:30002", - AccessKey: "minio", - SecretKey: "miniostorage", - }, } if err := configutil.SetupConfig(configutil.FlytectlConfig, configutil.GetTemplate(), templateValues); err != nil { return nil, err diff --git a/flytectl/pkg/sandbox/status_test.go b/flytectl/pkg/sandbox/status_test.go index 2bc3a0529c..2f49e4e434 100644 --- a/flytectl/pkg/sandbox/status_test.go +++ b/flytectl/pkg/sandbox/status_test.go @@ -14,15 +14,13 @@ import ( func TestSandboxStatus(t *testing.T) { t.Run("Sandbox status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) err := PrintStatus(s.Ctx, mockDocker) assert.Nil(t, err) }) t.Run("Sandbox status with running sandbox", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flyteidl/clients/go/admin/auth_interceptor_test.go b/flyteidl/clients/go/admin/auth_interceptor_test.go index 0f47e97b9c..b03171c825 100644 --- a/flyteidl/clients/go/admin/auth_interceptor_test.go +++ b/flyteidl/clients/go/admin/auth_interceptor_test.go @@ -343,7 +343,7 @@ func TestNewAuthInterceptorAndMaterialize(t *testing.T) { AuthType: AuthTypeClientSecret, TokenURL: fmt.Sprintf("http://localhost:%d/oauth2/token", httpPort), Scopes: []string{"all"}, - Audience: "http://localhost:30081", + Audience: fmt.Sprintf("http://localhost:%d", httpPort), AuthorizationHeader: "authorization", } diff --git a/flyteidl/clients/go/admin/client.go b/flyteidl/clients/go/admin/client.go index 757f25b160..9f14d49dee 100644 --- a/flyteidl/clients/go/admin/client.go +++ b/flyteidl/clients/go/admin/client.go @@ -80,6 +80,10 @@ func GetAdditionalAdminClientConfigOptions(cfg *Config) []grpc.DialOption { // ever has those endpoints opts = append(opts, grpc.WithChainUnaryInterceptor(grpcPrometheus.UnaryClientInterceptor, retryInterceptor)) + if cfg.MaxMessageSizeBytes > 0 { + opts = append(opts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(cfg.MaxMessageSizeBytes))) + } + return opts } diff --git a/flyteidl/clients/go/admin/config.go b/flyteidl/clients/go/admin/config.go index 03f2f8ecc2..6c8adf3553 100644 --- a/flyteidl/clients/go/admin/config.go +++ b/flyteidl/clients/go/admin/config.go @@ -45,6 +45,7 @@ type Config struct { MaxBackoffDelay config.Duration `json:"maxBackoffDelay" pflag:",Max delay for grpc backoff"` PerRetryTimeout config.Duration `json:"perRetryTimeout" pflag:",gRPC per retry timeout"` MaxRetries int `json:"maxRetries" pflag:",Max number of gRPC retries"` + MaxMessageSizeBytes int `json:"maxMessageSizeBytes" pflag:",The max size in bytes for incoming gRPC messages"` AuthType AuthType `json:"authType" pflag:",Type of OAuth2 flow used for communicating with admin.ClientSecret,Pkce,ExternalCommand are valid values"` TokenRefreshWindow config.Duration `json:"tokenRefreshWindow" pflag:",Max duration between token refresh attempt and token expiry."` // Deprecated: settings will be discovered dynamically diff --git a/flyteidl/clients/go/admin/config_flags.go b/flyteidl/clients/go/admin/config_flags.go index db1305c0b1..c344147c8c 100755 --- a/flyteidl/clients/go/admin/config_flags.go +++ b/flyteidl/clients/go/admin/config_flags.go @@ -57,6 +57,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.String(fmt.Sprintf("%v%v", prefix, "maxBackoffDelay"), defaultConfig.MaxBackoffDelay.String(), "Max delay for grpc backoff") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "perRetryTimeout"), defaultConfig.PerRetryTimeout.String(), "gRPC per retry timeout") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "maxRetries"), defaultConfig.MaxRetries, "Max number of gRPC retries") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "maxMessageSizeBytes"), defaultConfig.MaxMessageSizeBytes, "The max size in bytes for incoming gRPC messages") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "authType"), defaultConfig.AuthType.String(), "Type of OAuth2 flow used for communicating with admin.ClientSecret, Pkce, ExternalCommand are valid values") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "tokenRefreshWindow"), defaultConfig.TokenRefreshWindow.String(), "Max duration between token refresh attempt and token expiry.") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "useAuth"), defaultConfig.DeprecatedUseAuth, "Deprecated: Auth will be enabled/disabled based on admin's dynamically discovered information.") diff --git a/flyteidl/clients/go/admin/config_flags_test.go b/flyteidl/clients/go/admin/config_flags_test.go index e815bcb5f3..a79467dc16 100755 --- a/flyteidl/clients/go/admin/config_flags_test.go +++ b/flyteidl/clients/go/admin/config_flags_test.go @@ -197,6 +197,20 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_maxMessageSizeBytes", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("maxMessageSizeBytes", testValue) + if vInt, err := cmdFlags.GetInt("maxMessageSizeBytes"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.MaxMessageSizeBytes) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_authType", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flyteidl/clients/go/admin/token_source_provider.go b/flyteidl/clients/go/admin/token_source_provider.go index 83df542082..4ecfa59215 100644 --- a/flyteidl/clients/go/admin/token_source_provider.go +++ b/flyteidl/clients/go/admin/token_source_provider.go @@ -9,6 +9,9 @@ import ( "strings" "sync" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" @@ -167,6 +170,7 @@ func GetPKCEAuthTokenSource(ctx context.Context, pkceTokenOrchestrator pkce.Toke type ClientCredentialsTokenSourceProvider struct { ccConfig clientcredentials.Config tokenCache cache.TokenCache + cfg *Config } func NewClientCredentialsTokenSourceProvider(ctx context.Context, cfg *Config, scopes []string, tokenURL string, @@ -198,7 +202,9 @@ func NewClientCredentialsTokenSourceProvider(ctx context.Context, cfg *Config, s Scopes: scopes, EndpointParams: endpointParams, }, - tokenCache: tokenCache}, nil + tokenCache: tokenCache, + cfg: cfg, + }, nil } func (p ClientCredentialsTokenSourceProvider) GetTokenSource(ctx context.Context) (oauth2.TokenSource, error) { @@ -207,6 +213,7 @@ func (p ClientCredentialsTokenSourceProvider) GetTokenSource(ctx context.Context new: p.ccConfig.TokenSource(ctx), mu: sync.Mutex{}, tokenCache: p.tokenCache, + cfg: p.cfg, }, nil } @@ -215,6 +222,7 @@ type customTokenSource struct { mu sync.Mutex // guards everything else new oauth2.TokenSource tokenCache cache.TokenCache + cfg *Config } func (s *customTokenSource) Token() (*oauth2.Token, error) { @@ -225,10 +233,24 @@ func (s *customTokenSource) Token() (*oauth2.Token, error) { return token, nil } - token, err := s.new.Token() + totalAttempts := s.cfg.MaxRetries + 1 // Add one for initial request attempt + backoff := wait.Backoff{ + Duration: s.cfg.PerRetryTimeout.Duration, + Steps: totalAttempts, + } + var token *oauth2.Token + err := retry.OnError(backoff, func(err error) bool { + return err != nil + }, func() (err error) { + token, err = s.new.Token() + if err != nil { + logger.Infof(s.ctx, "failed to get token: %w", err) + return fmt.Errorf("failed to get token: %w", err) + } + return nil + }) if err != nil { - logger.Warnf(s.ctx, "failed to get token: %v", err) - return nil, fmt.Errorf("failed to get token: %w", err) + return nil, err } logger.Infof(s.ctx, "retrieved token with expiry %v", token.Expiry) diff --git a/flyteidl/go.mod b/flyteidl/go.mod index 5aa9ba2b15..4c913dcb4d 100644 --- a/flyteidl/go.mod +++ b/flyteidl/go.mod @@ -16,11 +16,12 @@ require ( github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/net v0.27.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.18.0 google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 k8s.io/apimachinery v0.28.2 + k8s.io/client-go v0.28.1 ) require ( @@ -38,7 +39,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect @@ -70,16 +71,15 @@ require ( github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/ncw/swift v1.0.53 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_golang v1.19.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/cobra v1.7.0 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -111,7 +111,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/api v0.28.2 // indirect - k8s.io/client-go v0.28.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect diff --git a/flyteidl/go.sum b/flyteidl/go.sum index bf47d92a09..f440e247e9 100644 --- a/flyteidl/go.sum +++ b/flyteidl/go.sum @@ -33,8 +33,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= @@ -180,8 +180,6 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -205,15 +203,15 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -308,11 +306,10 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -401,8 +398,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/flyteplugins/go.mod b/flyteplugins/go.mod index e62eda562d..7616900390 100644 --- a/flyteplugins/go.mod +++ b/flyteplugins/go.mod @@ -21,17 +21,18 @@ require ( github.com/magiconair/properties v1.8.6 github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.19.1 github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 + github.com/shamaton/msgpack/v2 v2.2.2 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 golang.org/x/net v0.27.0 - golang.org/x/oauth2 v0.16.0 + golang.org/x/oauth2 v0.18.0 golang.org/x/time v0.5.0 google.golang.org/api v0.155.0 google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible k8s.io/api v0.28.4 @@ -61,7 +62,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -95,8 +96,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -105,9 +105,9 @@ require ( github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect diff --git a/flyteplugins/go.sum b/flyteplugins/go.sum index c8aa6c1254..d11f6b60a3 100644 --- a/flyteplugins/go.sum +++ b/flyteplugins/go.sum @@ -93,8 +93,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -296,10 +296,9 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -327,21 +326,23 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 h1:skD8MXnQMO3QGUeTKt09VOXvuch/gJh8+6q3OLm0kAQ= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1/go.mod h1:ZqyKKvMP5nKDldQoKmur+Wcx7wVlV9Q98phFqHzr+KY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shamaton/msgpack/v2 v2.2.2 h1:GOIg0c9LV04VwzOOqZSrmsv/JzjNOOMxnS/HvOHGdgs= +github.com/shamaton/msgpack/v2 v2.2.2/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= @@ -508,8 +509,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -563,6 +564,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= @@ -748,8 +750,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go index 7a787c5590..5aea60c4b9 100644 --- a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go +++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go @@ -27,7 +27,9 @@ import ( "github.com/golang/protobuf/ptypes" "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" idlCore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io" @@ -199,6 +201,19 @@ func serializeLiteralScalar(l *idlCore.Scalar) (string, error) { return o.Blob.Uri, nil case *idlCore.Scalar_Schema: return o.Schema.Uri, nil + case *idlCore.Scalar_Binary: + binaryBytes := o.Binary.Value + var currVal any + if o.Binary.Tag == coreutils.MESSAGEPACK { + err := msgpack.Unmarshal(binaryBytes, &currVal) + if err != nil { + return "", fmt.Errorf("failed to unmarshal messagepack bytes with literal:[%v], err:[%v]", l, err) + } + // TODO: Try to support Primitive_Datetime, Primitive_Duration, Flyte File, and Flyte Directory. + return fmt.Sprintf("%v", currVal), nil + } + return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.Tag) + default: return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.Value)) } diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go index 956ec33cfd..0fa96a1a05 100644 --- a/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go @@ -7,6 +7,7 @@ import ( "testing" "time" + "github.com/shamaton/msgpack/v2" "github.com/stretchr/testify/assert" "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" @@ -747,3 +748,55 @@ func TestSerializeLiteral(t *testing.T) { assert.Equal(t, "s3://some-bucket/fdsa/x.parquet", interpolated) }) } + +func TestSerializeLiteralScalar_BinaryMessagePack(t *testing.T) { + // Create a simple map to be serialized into MessagePack format + testMap := map[string]interface{}{ + "a": 1, + "b": true, + "c": 1.1, + "d": "string", + } + + // Serialize the map using MessagePack + encodedData, err := msgpack.Marshal(testMap) + assert.NoError(t, err) + + // Create the core.Scalar_Binary with the encoded MessagePack data and MESSAGEPACK tag + binaryScalar := &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: encodedData, + Tag: coreutils.MESSAGEPACK, + }, + }, + } + + // Call the function we want to test + result, err := serializeLiteralScalar(binaryScalar) + assert.NoError(t, err) + + // Since the map should be decoded back, we expect a simple string representation of the map + expectedResult := "map[a:1 b:true c:1.1 d:string]" + assert.Equal(t, expectedResult, result) +} + +func TestSerializeLiteralScalar_BinaryUnsupportedTag(t *testing.T) { + // Create some binary data for testing + binaryData := []byte{0x01, 0x02, 0x03} + + // Create a core.Scalar_Binary with an unsupported tag + binaryScalar := &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: binaryData, + Tag: "unsupported-tag", + }, + }, + } + + // Call the function and expect an error because the tag is unsupported + _, err := serializeLiteralScalar(binaryScalar) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported binary tag") +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go index 8e89e58d3d..eaee5bce6c 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go @@ -201,14 +201,15 @@ func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c return nil } -func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilotPod *v1.PodSpec, iFace *core.TypedInterface, taskExecMetadata core2.TaskExecutionMetadata, inputPaths io.InputFilePaths, outputPaths io.OutputFilePaths, pilot *core.DataLoadingConfig) error { +func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilotPod *v1.PodSpec, iFace *core.TypedInterface, taskExecMetadata core2.TaskExecutionMetadata, inputPaths io.InputFilePaths, outputPaths io.OutputFilePaths, pilot *core.DataLoadingConfig) (string, error) { if pilot == nil || !pilot.Enabled { - return nil + return "", nil } logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) shareProcessNamespaceEnabled := true coPilotPod.ShareProcessNamespace = &shareProcessNamespaceEnabled + primaryInitContainerName := "" if iFace != nil { if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 { inPath := cfg.DefaultInputDataPath @@ -231,13 +232,14 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot // Lets add the Inputs init container args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.Inputs) if err != nil { - return err + return primaryInitContainerName, err } downloader, err := FlyteCoPilotContainer(flyteInitContainerName, cfg, args, inputsVolumeMount) if err != nil { - return err + return primaryInitContainerName, err } coPilotPod.InitContainers = append(coPilotPod.InitContainers, downloader) + primaryInitContainerName = downloader.Name } if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 { @@ -260,15 +262,15 @@ func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilot // Lets add the Inputs init container args, err := SidecarCommandArgs(outPath, outputPaths.GetOutputPrefixPath(), outputPaths.GetRawOutputPrefix(), cfg.StartTimeout.Duration, iFace) if err != nil { - return err + return primaryInitContainerName, err } sidecar, err := FlyteCoPilotContainer(flyteSidecarContainerName, cfg, args, outputsVolumeMount) if err != nil { - return err + return primaryInitContainerName, err } coPilotPod.Containers = append(coPilotPod.Containers, sidecar) } } - return nil + return primaryInitContainerName, nil } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go index 09a9fbf52b..aba18c85ac 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go @@ -533,7 +533,9 @@ func TestAddCoPilotToPod(t *testing.T) { InputPath: "in", OutputPath: "out", } - assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot) + assert.NoError(t, err) + assert.Equal(t, "test-downloader", primaryInitContainerName) assertPodHasSNPS(t, &pod) assertPodHasCoPilot(t, cfg, pilot, iface, &pod) }) @@ -545,7 +547,9 @@ func TestAddCoPilotToPod(t *testing.T) { InputPath: "in", OutputPath: "out", } - assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, nil, taskMetadata, inputPaths, opath, pilot)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, &pod, nil, taskMetadata, inputPaths, opath, pilot) + assert.NoError(t, err) + assert.Empty(t, primaryInitContainerName) assertPodHasSNPS(t, &pod) assertPodHasCoPilot(t, cfg, pilot, nil, &pod) }) @@ -565,7 +569,9 @@ func TestAddCoPilotToPod(t *testing.T) { InputPath: "in", OutputPath: "out", } - assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot) + assert.NoError(t, err) + assert.Equal(t, "test-downloader", primaryInitContainerName) assertPodHasSNPS(t, &pod) assertPodHasCoPilot(t, cfg, pilot, iface, &pod) }) @@ -584,7 +590,9 @@ func TestAddCoPilotToPod(t *testing.T) { InputPath: "in", OutputPath: "out", } - assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot) + assert.NoError(t, err) + assert.Empty(t, primaryInitContainerName) assertPodHasSNPS(t, &pod) assertPodHasCoPilot(t, cfg, pilot, iface, &pod) }) @@ -603,11 +611,15 @@ func TestAddCoPilotToPod(t *testing.T) { InputPath: "in", OutputPath: "out", } - assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot) + assert.NoError(t, err) + assert.Empty(t, primaryInitContainerName) assert.Len(t, pod.Volumes, 0) }) t.Run("nil", func(t *testing.T) { - assert.NoError(t, AddCoPilotToPod(ctx, cfg, nil, nil, taskMetadata, inputPaths, opath, nil)) + primaryInitContainerName, err := AddCoPilotToPod(ctx, cfg, nil, nil, taskMetadata, inputPaths, opath, nil) + assert.NoError(t, err) + assert.Empty(t, primaryInitContainerName) }) } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go index e8252090df..229f963968 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go @@ -28,7 +28,9 @@ const PrimaryContainerNotFound = "PrimaryContainerNotFound" const SIGKILL = 137 const defaultContainerTemplateName = "default" +const defaultInitContainerTemplateName = "default-init" const primaryContainerTemplateName = "primary" +const primaryInitContainerTemplateName = "primary-init" const PrimaryContainerKey = "primary_container_name" // AddRequiredNodeSelectorRequirements adds the provided v1.NodeSelectorRequirement @@ -387,14 +389,17 @@ func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecut dataLoadingConfig = pod.GetDataConfig() } + primaryInitContainerName := "" + if dataLoadingConfig != nil { if err := AddCoPilotToContainer(ctx, config.GetK8sPluginConfig().CoPilot, primaryContainer, taskTemplate.Interface, dataLoadingConfig); err != nil { return nil, nil, err } - if err := AddCoPilotToPod(ctx, config.GetK8sPluginConfig().CoPilot, podSpec, taskTemplate.GetInterface(), - tCtx.TaskExecutionMetadata(), tCtx.InputReader(), tCtx.OutputWriter(), dataLoadingConfig); err != nil { + primaryInitContainerName, err = AddCoPilotToPod(ctx, config.GetK8sPluginConfig().CoPilot, podSpec, taskTemplate.GetInterface(), + tCtx.TaskExecutionMetadata(), tCtx.InputReader(), tCtx.OutputWriter(), dataLoadingConfig) + if err != nil { return nil, nil, err } } @@ -406,7 +411,7 @@ func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecut } // merge PodSpec and ObjectMeta with configuration pod template (if exists) - podSpec, objectMeta, err = MergeWithBasePodTemplate(ctx, tCtx, podSpec, objectMeta, primaryContainerName) + podSpec, objectMeta, err = MergeWithBasePodTemplate(ctx, tCtx, podSpec, objectMeta, primaryContainerName, primaryInitContainerName) if err != nil { return nil, nil, err } @@ -495,7 +500,7 @@ func getBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionConte // MergeWithBasePodTemplate attempts to merge the provided PodSpec and ObjectMeta with the configuration PodTemplate for // this task. func MergeWithBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, - podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, primaryContainerName string) (*v1.PodSpec, *metav1.ObjectMeta, error) { + podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, primaryContainerName string, primaryInitContainerName string) (*v1.PodSpec, *metav1.ObjectMeta, error) { // attempt to retrieve base PodTemplate podTemplate, err := getBasePodTemplate(ctx, tCtx, DefaultPodTemplateStore) @@ -507,7 +512,7 @@ func MergeWithBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutio } // merge podSpec with podTemplate - mergedPodSpec, err := mergePodSpecs(&podTemplate.Template.Spec, podSpec, primaryContainerName) + mergedPodSpec, err := mergePodSpecs(&podTemplate.Template.Spec, podSpec, primaryContainerName, primaryInitContainerName) if err != nil { return nil, nil, err } @@ -524,7 +529,7 @@ func MergeWithBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutio // mergePodSpecs merges the two provided PodSpecs. This process uses the first as the base configuration, where values // set by the first PodSpec are overwritten by the second in the return value. Additionally, this function applies // container-level configuration from the basePodSpec. -func mergePodSpecs(basePodSpec *v1.PodSpec, podSpec *v1.PodSpec, primaryContainerName string) (*v1.PodSpec, error) { +func mergePodSpecs(basePodSpec *v1.PodSpec, podSpec *v1.PodSpec, primaryContainerName string, primaryInitContainerName string) (*v1.PodSpec, error) { if basePodSpec == nil || podSpec == nil { return nil, errors.New("neither the basePodSpec or the podSpec can be nil") } @@ -539,6 +544,16 @@ func mergePodSpecs(basePodSpec *v1.PodSpec, podSpec *v1.PodSpec, primaryContaine } } + // extract defaultInitContainerTemplate and primaryInitContainerTemplate + var defaultInitContainerTemplate, primaryInitContainerTemplate *v1.Container + for i := 0; i < len(basePodSpec.InitContainers); i++ { + if basePodSpec.InitContainers[i].Name == defaultInitContainerTemplateName { + defaultInitContainerTemplate = &basePodSpec.InitContainers[i] + } else if basePodSpec.InitContainers[i].Name == primaryInitContainerTemplateName { + primaryInitContainerTemplate = &basePodSpec.InitContainers[i] + } + } + // merge PodTemplate PodSpec with podSpec var mergedPodSpec *v1.PodSpec = basePodSpec.DeepCopy() if err := mergo.Merge(mergedPodSpec, podSpec, mergo.WithOverride, mergo.WithAppendSlice); err != nil { @@ -580,6 +595,43 @@ func mergePodSpecs(basePodSpec *v1.PodSpec, podSpec *v1.PodSpec, primaryContaine } mergedPodSpec.Containers = mergedContainers + + // merge PodTemplate init containers + var mergedInitContainers []v1.Container + for _, initContainer := range podSpec.InitContainers { + // if applicable start with defaultContainerTemplate + var mergedInitContainer *v1.Container + if defaultInitContainerTemplate != nil { + mergedInitContainer = defaultInitContainerTemplate.DeepCopy() + } + + // if applicable merge with primaryInitContainerTemplate + if initContainer.Name == primaryInitContainerName && primaryInitContainerTemplate != nil { + if mergedInitContainer == nil { + mergedInitContainer = primaryInitContainerTemplate.DeepCopy() + } else { + err := mergo.Merge(mergedInitContainer, primaryInitContainerTemplate, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + + // if applicable merge with existing init initContainer + if mergedInitContainer == nil { + mergedInitContainers = append(mergedInitContainers, initContainer) + } else { + err := mergo.Merge(mergedInitContainer, initContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + + mergedInitContainers = append(mergedInitContainers, *mergedInitContainer) + } + } + + mergedPodSpec.InitContainers = mergedInitContainers + return mergedPodSpec, nil } diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go index 0c2e9ef5cc..9797b5e05b 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go @@ -1934,6 +1934,14 @@ func TestMergeWithBasePodTemplate(t *testing.T) { Name: "bar", }, }, + InitContainers: []v1.Container{ + v1.Container{ + Name: "foo-init", + }, + v1.Container{ + Name: "foo-bar", + }, + }, } objectMeta := metav1.ObjectMeta{ @@ -1954,7 +1962,7 @@ func TestMergeWithBasePodTemplate(t *testing.T) { tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "")) tCtx.OnTaskReader().Return(taskReader) - resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo") + resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo", "foo-init") assert.Nil(t, err) assert.True(t, reflect.DeepEqual(podSpec, *resultPodSpec)) assert.True(t, reflect.DeepEqual(objectMeta, *resultObjectMeta)) @@ -1966,6 +1974,11 @@ func TestMergeWithBasePodTemplate(t *testing.T) { TerminationMessagePath: "/dev/primary-termination-log", } + primaryInitContainerTemplate := v1.Container{ + Name: primaryInitContainerTemplateName, + TerminationMessagePath: "/dev/primary-init-termination-log", + } + podTemplate := v1.PodTemplate{ ObjectMeta: metav1.ObjectMeta{ Name: "fooTemplate", @@ -1982,6 +1995,9 @@ func TestMergeWithBasePodTemplate(t *testing.T) { Containers: []v1.Container{ primaryContainerTemplate, }, + InitContainers: []v1.Container{ + primaryInitContainerTemplate, + }, }, }, } @@ -2008,13 +2024,16 @@ func TestMergeWithBasePodTemplate(t *testing.T) { tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "")) tCtx.OnTaskReader().Return(taskReader) - resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo") + resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo", "foo-init") assert.Nil(t, err) // test that template podSpec is merged primaryContainer := resultPodSpec.Containers[0] assert.Equal(t, podSpec.Containers[0].Name, primaryContainer.Name) assert.Equal(t, primaryContainerTemplate.TerminationMessagePath, primaryContainer.TerminationMessagePath) + primaryInitContainer := resultPodSpec.InitContainers[0] + assert.Equal(t, podSpec.InitContainers[0].Name, primaryInitContainer.Name) + assert.Equal(t, primaryInitContainerTemplate.TerminationMessagePath, primaryInitContainer.TerminationMessagePath) // test that template object metadata is copied assert.Contains(t, resultObjectMeta.Labels, "fooKey") @@ -2027,13 +2046,13 @@ func TestMergeWithBasePodTemplate(t *testing.T) { func TestMergePodSpecs(t *testing.T) { var priority int32 = 1 - podSpec1, _ := mergePodSpecs(nil, nil, "foo") + podSpec1, _ := mergePodSpecs(nil, nil, "foo", "foo-init") assert.Nil(t, podSpec1) - podSpec2, _ := mergePodSpecs(&v1.PodSpec{}, nil, "foo") + podSpec2, _ := mergePodSpecs(&v1.PodSpec{}, nil, "foo", "foo-init") assert.Nil(t, podSpec2) - podSpec3, _ := mergePodSpecs(nil, &v1.PodSpec{}, "foo") + podSpec3, _ := mergePodSpecs(nil, &v1.PodSpec{}, "foo", "foo-init") assert.Nil(t, podSpec3) podSpec := v1.PodSpec{ @@ -2051,6 +2070,20 @@ func TestMergePodSpecs(t *testing.T) { Name: "bar", }, }, + InitContainers: []v1.Container{ + v1.Container{ + Name: "primary-init", + VolumeMounts: []v1.VolumeMount{ + { + Name: "nccl", + MountPath: "abc", + }, + }, + }, + v1.Container{ + Name: "bar-init", + }, + }, NodeSelector: map[string]string{ "baz": "bar", }, @@ -2076,11 +2109,25 @@ func TestMergePodSpecs(t *testing.T) { TerminationMessagePath: "/dev/primary-termination-log", } + defaultInitContainerTemplate := v1.Container{ + Name: defaultInitContainerTemplateName, + TerminationMessagePath: "/dev/default-init-termination-log", + } + + primaryInitContainerTemplate := v1.Container{ + Name: primaryInitContainerTemplateName, + TerminationMessagePath: "/dev/primary-init-termination-log", + } + podTemplateSpec := v1.PodSpec{ Containers: []v1.Container{ defaultContainerTemplate, primaryContainerTemplate, }, + InitContainers: []v1.Container{ + defaultInitContainerTemplate, + primaryInitContainerTemplate, + }, HostNetwork: true, NodeSelector: map[string]string{ "foo": "bar", @@ -2093,7 +2140,7 @@ func TestMergePodSpecs(t *testing.T) { }, } - mergedPodSpec, err := mergePodSpecs(&podTemplateSpec, &podSpec, "primary") + mergedPodSpec, err := mergePodSpecs(&podTemplateSpec, &podSpec, "primary", "primary-init") assert.Nil(t, err) // validate a PodTemplate-only field @@ -2117,6 +2164,17 @@ func TestMergePodSpecs(t *testing.T) { defaultContainer := mergedPodSpec.Containers[1] assert.Equal(t, podSpec.Containers[1].Name, defaultContainer.Name) assert.Equal(t, defaultContainerTemplate.TerminationMessagePath, defaultContainer.TerminationMessagePath) + + // validate primary init container + primaryInitContainer := mergedPodSpec.InitContainers[0] + assert.Equal(t, podSpec.InitContainers[0].Name, primaryInitContainer.Name) + assert.Equal(t, primaryInitContainerTemplate.TerminationMessagePath, primaryInitContainer.TerminationMessagePath) + assert.Equal(t, 1, len(primaryInitContainer.VolumeMounts)) + + // validate default init container + defaultInitContainer := mergedPodSpec.InitContainers[1] + assert.Equal(t, podSpec.InitContainers[1].Name, defaultInitContainer.Name) + assert.Equal(t, defaultInitContainerTemplate.TerminationMessagePath, defaultInitContainer.TerminationMessagePath) } func TestAddFlyteCustomizationsToContainer_SetConsoleUrl(t *testing.T) { diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go index 77b3ac6501..b76fe70d28 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context.go @@ -175,7 +175,7 @@ func (s SubTaskExecutionID) GetLogSuffix() string { return fmt.Sprintf(" #%d-%d-%d", s.taskRetryAttempt, s.executionIndex, s.subtaskRetryAttempt) } -var logTemplateRegexes = struct { +var LogTemplateRegexes = struct { ExecutionIndex *regexp.Regexp ParentName *regexp.Regexp RetryAttempt *regexp.Regexp @@ -189,17 +189,17 @@ var logTemplateRegexes = struct { func (s SubTaskExecutionID) TemplateVarsByScheme() []tasklog.TemplateVar { return []tasklog.TemplateVar{ - {Regex: logTemplateRegexes.ParentName, Value: s.parentName}, + {Regex: LogTemplateRegexes.ParentName, Value: s.parentName}, { - Regex: logTemplateRegexes.ExecutionIndex, + Regex: LogTemplateRegexes.ExecutionIndex, Value: strconv.FormatUint(uint64(s.executionIndex), 10), }, { - Regex: logTemplateRegexes.RetryAttempt, + Regex: LogTemplateRegexes.RetryAttempt, Value: strconv.FormatUint(s.subtaskRetryAttempt, 10), }, { - Regex: logTemplateRegexes.ParentRetryAttempt, + Regex: LogTemplateRegexes.ParentRetryAttempt, Value: strconv.FormatUint(uint64(s.taskRetryAttempt), 10), }, } diff --git a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go index 103980fab0..a7f5aa20b4 100644 --- a/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go +++ b/flyteplugins/go/tasks/plugins/array/k8s/subtask_exec_context_test.go @@ -37,10 +37,10 @@ func TestSubTaskExecutionContext(t *testing.T) { assert.Equal(t, storage.DataReference("/raw_prefix/5/1"), stCtx.OutputWriter().GetRawOutputPrefix()) assert.Equal(t, []tasklog.TemplateVar{ - {Regex: logTemplateRegexes.ParentName, Value: "notfound"}, - {Regex: logTemplateRegexes.ExecutionIndex, Value: "0"}, - {Regex: logTemplateRegexes.RetryAttempt, Value: "1"}, - {Regex: logTemplateRegexes.ParentRetryAttempt, Value: "0"}, + {Regex: LogTemplateRegexes.ParentName, Value: "notfound"}, + {Regex: LogTemplateRegexes.ExecutionIndex, Value: "0"}, + {Regex: LogTemplateRegexes.RetryAttempt, Value: "1"}, + {Regex: LogTemplateRegexes.ParentRetryAttempt, Value: "0"}, }, stCtx.TaskExecutionMetadata().GetTaskExecutionID().(SubTaskExecutionID).TemplateVarsByScheme(), ) diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config.go b/flyteplugins/go/tasks/plugins/k8s/dask/config.go new file mode 100644 index 0000000000..aac388e116 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config.go @@ -0,0 +1,29 @@ +package dask + +import ( + pluginsConfig "github.com/flyteorg/flyte/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/logs" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + Logs: logs.DefaultConfig, + } + + configSection = pluginsConfig.MustRegisterSubSection("dask", &defaultConfig) +) + +// Config is config for 'dask' plugin +type Config struct { + Logs logs.LogConfig `json:"logs,omitempty"` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go new file mode 100755 index 0000000000..03774b772b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go @@ -0,0 +1,65 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package dask + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-enabled"), defaultConfig.Logs.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-region"), defaultConfig.Logs.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-log-group"), defaultConfig.Logs.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-template-uri"), defaultConfig.Logs.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-enabled"), defaultConfig.Logs.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-url"), defaultConfig.Logs.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-template-uri"), defaultConfig.Logs.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-enabled"), defaultConfig.Logs.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.gcp-project"), defaultConfig.Logs.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-logresourcename"), defaultConfig.Logs.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-template-uri"), defaultConfig.Logs.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go new file mode 100755 index 0000000000..4cd2be2b44 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go @@ -0,0 +1,256 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package dask + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_logs.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go index d3b4ab32f1..ae68a4c760 100644 --- a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go +++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go @@ -279,7 +279,7 @@ func createJobSpec(workerSpec daskAPI.WorkerSpec, schedulerSpec daskAPI.Schedule } func (p daskResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, r client.Object) (pluginsCore.PhaseInfo, error) { - logPlugin, err := logs.InitializeLogPlugins(logs.GetLogConfig()) + logPlugin, err := logs.InitializeLogPlugins(&GetConfig().Logs) if err != nil { return pluginsCore.PhaseInfoUndefined, err } @@ -296,7 +296,7 @@ func (p daskResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s tasklog.Input{ Namespace: job.ObjectMeta.Namespace, PodName: job.Status.JobRunnerPodName, - LogName: "(User logs)", + LogName: "(Dask Runner Logs)", TaskExecutionID: taskExecID, }, ) diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go index 90388b46a5..95a87f4efa 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go @@ -574,6 +574,10 @@ func (plugin rayJobResourceHandler) GetTaskPhase(ctx context.Context, pluginCont phaseInfo, err = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info), nil case rayv1.JobDeploymentStatusComplete: phaseInfo, err = pluginsCore.PhaseInfoSuccess(info), nil + case rayv1.JobDeploymentStatusSuspended: + phaseInfo, err = pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "Suspended", info), nil + case rayv1.JobDeploymentStatusSuspending: + phaseInfo, err = pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "Suspending", info), nil case rayv1.JobDeploymentStatusFailed: failInfo := fmt.Sprintf("Failed to run Ray job %s with error: [%s] %s", rayJob.Name, rayJob.Status.Reason, rayJob.Status.Message) phaseInfo, err = pluginsCore.PhaseInfoFailure(flyteerr.TaskFailedWithError, failInfo, info), nil diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go index 7b555e9f23..38b2f56785 100644 --- a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go @@ -755,7 +755,8 @@ func TestGetTaskPhase(t *testing.T) { {rayv1.JobDeploymentStatusRunning, pluginsCore.PhaseRunning, false}, {rayv1.JobDeploymentStatusComplete, pluginsCore.PhaseSuccess, false}, {rayv1.JobDeploymentStatusFailed, pluginsCore.PhasePermanentFailure, false}, - {rayv1.JobDeploymentStatusSuspended, pluginsCore.PhaseUndefined, true}, + {rayv1.JobDeploymentStatusSuspended, pluginsCore.PhaseQueued, false}, + {rayv1.JobDeploymentStatusSuspending, pluginsCore.PhaseQueued, false}, } for _, tc := range testCases { diff --git a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go index a7b2a3d1d4..4fffe2bee5 100644 --- a/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go +++ b/flyteplugins/go/tasks/plugins/webapi/agent/plugin.go @@ -176,9 +176,15 @@ func (p *Plugin) ExecuteTaskSync( return nil, nil, fmt.Errorf("failed to send inputsProto with error: %w", err) } + // Client is done with sending + if err := stream.CloseSend(); err != nil { + logger.Errorf(ctx, "failed to close stream with err %s", err.Error()) + return nil, nil, err + } + in, err := stream.Recv() if err != nil { - logger.Errorf(ctx, "Failed to write output with err %s", err.Error()) + logger.Errorf(ctx, "failed to write output with err %s", err.Error()) return nil, nil, err } if in.GetHeader() == nil { @@ -188,11 +194,6 @@ func (p *Plugin) ExecuteTaskSync( // For now, Propeller assumes that the output is always in the header. resource := in.GetHeader().GetResource() - if err := stream.CloseSend(); err != nil { - logger.Errorf(ctx, "Failed to close stream with err %s", err.Error()) - return nil, nil, err - } - return nil, ResourceWrapper{ Phase: resource.Phase, Outputs: resource.Outputs, @@ -272,7 +273,7 @@ func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phas case flyteIdl.TaskExecution_SUCCEEDED: err = writeOutput(ctx, taskCtx, resource.Outputs) if err != nil { - logger.Errorf(ctx, "Failed to write output with err %s", err.Error()) + logger.Errorf(ctx, "failed to write output with err %s", err.Error()) return core.PhaseInfoUndefined, err } return core.PhaseInfoSuccess(taskInfo), nil @@ -300,7 +301,7 @@ func (p *Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phas case admin.State_SUCCEEDED: err = writeOutput(ctx, taskCtx, resource.Outputs) if err != nil { - logger.Errorf(ctx, "Failed to write output with err %s", err.Error()) + logger.Errorf(ctx, "failed to write output with err %s", err.Error()) return core.PhaseInfoUndefined, err } return core.PhaseInfoSuccess(taskInfo), nil @@ -336,8 +337,10 @@ func (p *Plugin) getAsyncAgentClient(ctx context.Context, agent *Deployment) (se func (p *Plugin) watchAgents(ctx context.Context, agentService *core.AgentService) { go wait.Until(func() { - clientSet := getAgentClientSets(ctx) - agentRegistry := getAgentRegistry(ctx, clientSet) + childCtx, cancel := context.WithCancel(ctx) + defer cancel() + clientSet := getAgentClientSets(childCtx) + agentRegistry := getAgentRegistry(childCtx, clientSet) p.setRegistry(agentRegistry) agentService.SetSupportedTaskType(maps.Keys(agentRegistry)) }, p.cfg.PollInterval.Duration, ctx.Done()) diff --git a/flytepropeller/go.mod b/flytepropeller/go.mod index a14f689a2b..c819278d90 100644 --- a/flytepropeller/go.mod +++ b/flytepropeller/go.mod @@ -21,7 +21,7 @@ require ( github.com/magiconair/properties v1.8.6 github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.19.1 github.com/shamaton/msgpack/v2 v2.2.2 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 @@ -34,7 +34,7 @@ require ( golang.org/x/sync v0.7.0 golang.org/x/time v0.5.0 google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 k8s.io/api v0.28.4 k8s.io/apiextensions-apiserver v0.28.4 k8s.io/apimachinery v0.28.4 @@ -68,7 +68,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/coocood/freecache v1.1.1 // indirect github.com/dask/dask-kubernetes/v2023 v2023.0.0-20230626103304-abd02cd17b26 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -104,8 +104,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -115,9 +114,9 @@ require ( github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect @@ -137,7 +136,7 @@ require ( go.opentelemetry.io/proto/otlp v1.1.0 // indirect golang.org/x/crypto v0.25.0 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect diff --git a/flytepropeller/go.sum b/flytepropeller/go.sum index 87d2b03633..37a8766913 100644 --- a/flytepropeller/go.sum +++ b/flytepropeller/go.sum @@ -97,8 +97,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -318,10 +318,9 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -359,15 +358,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 h1:skD8MXnQMO3QGUeTKt09VOXvuch/gJh8+6q3OLm0kAQ= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1/go.mod h1:ZqyKKvMP5nKDldQoKmur+Wcx7wVlV9Q98phFqHzr+KY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= @@ -549,8 +548,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -610,6 +609,7 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= @@ -797,8 +797,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/flytepropeller/manager/doc.go b/flytepropeller/manager/doc.go index 025b60cf1f..ceb3192e1a 100644 --- a/flytepropeller/manager/doc.go +++ b/flytepropeller/manager/doc.go @@ -18,7 +18,7 @@ FlytePropeller Manager handles dynamic updates to both the k8s PodTemplate and s # Shard Strategies -Flyte defines a variety of Shard Strategies for configuring how FlyteWorkflows are sharded. These options may include the shard type (ex. hash, project, or domain) along with the number of shards or the distribution of project / domain IDs over shards. +Flyte defines a variety of Shard Strategies for configuring how FlyteWorkflows are sharded. These options may include the shard type (ex. Hash, Project, or Domain) along with the number of shards or the distribution of project / domain IDs over shards. Internally, FlyteWorkflow CRDs are initialized with k8s labels for project, domain, and a shard-key. The project and domain label values are associated with the environment of the registered workflow. The shard-key value is a range-bounded hash over various components of the FlyteWorkflow metadata, currently the keyspace range is defined as [0,32). A sharded Flyte deployment ensures deterministic FlyteWorkflow evaluations by setting disjoint k8s label selectors, based on the aforementioned labels, on each managed FlytePropeller instance. This ensures that only a single FlytePropeller instance is responsible for processing each FlyteWorkflow. @@ -28,10 +28,10 @@ The Hash Shard Strategy, denoted by "type: hash" in the configuration below, use manager: # pod and scanning configuration redacted shard: - type: hash # use the "hash" shard strategy + type: Hash # use the "hash" shard strategy shard-count: 4 # the total number of shards -The Project and Domain Shard Strategies, denoted by "type: project" and "type: domain" respectively, use the FlyteWorkflow project and domain metadata to distributed FlyteWorkflows over managed FlytePropeller instances. These Shard Strategies are configured using a "per-shard-mapping" option, which is a list of ID lists. Each element in the "per-shard-mapping" list defines a new shard and the ID list assigns responsibility for the specified IDs to that shard. The assignment is performed using k8s label selectors, where each managed FlytePropeller instance includes FlyteWorkflows with the specified project or domain labels. +The Project and Domain Shard Strategies, denoted by "type: Project" and "type: Domain" respectively, use the FlyteWorkflow project and domain metadata to distributed FlyteWorkflows over managed FlytePropeller instances. These Shard Strategies are configured using a "per-shard-mapping" option, which is a list of ID lists. Each element in the "per-shard-mapping" list defines a new shard and the ID list assigns responsibility for the specified IDs to that shard. The assignment is performed using k8s label selectors, where each managed FlytePropeller instance includes FlyteWorkflows with the specified project or domain labels. A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and on that shard their must be only one ID, namely the wildcard. In this case, the managed FlytePropeller instance uses k8s label selectors to exclude FlyteWorkflows with project or domain IDs from other shards. @@ -39,7 +39,7 @@ A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs manager: # pod and scanning configuration redacted shard: - type: project # use the "project" shard strategy + type: Project # use the "Project" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - flytesnacks @@ -53,7 +53,7 @@ A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs manager: # pod and scanning configuration redacted shard: - type: domain # use the "domain" shard strategy + type: Domain # use the "Domain" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - production diff --git a/flytepropeller/pkg/compiler/common/pretty_print.go b/flytepropeller/pkg/compiler/common/pretty_print.go new file mode 100644 index 0000000000..61df408a4e --- /dev/null +++ b/flytepropeller/pkg/compiler/common/pretty_print.go @@ -0,0 +1,23 @@ +package common + +import ( + "fmt" + "strings" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func LiteralTypeToStr(lt *core.LiteralType) string { + if lt == nil { + return "None" + } + if lt.GetSimple() == core.SimpleType_STRUCT { + var structure string + for k, v := range lt.GetStructure().GetDataclassType() { + structure += fmt.Sprintf("dataclass_type:{key:%v value:{%v}, ", k, LiteralTypeToStr(v)) + } + structure = strings.TrimSuffix(structure, ", ") + return fmt.Sprintf("simple: STRUCT structure{%v}", structure) + } + return lt.String() +} diff --git a/flytepropeller/pkg/compiler/common/pretty_print_test.go b/flytepropeller/pkg/compiler/common/pretty_print_test.go new file mode 100644 index 0000000000..2d875af5dd --- /dev/null +++ b/flytepropeller/pkg/compiler/common/pretty_print_test.go @@ -0,0 +1,36 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func TestLiteralTypeToStr(t *testing.T) { + dataclassType := &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}, + Structure: &core.TypeStructure{ + DataclassType: map[string]*core.LiteralType{ + "a": { + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + }, + }, + }, + Metadata: &structpb.Struct{Fields: map[string]*structpb.Value{ + "key": {Kind: &structpb.Value_StringValue{StringValue: "a"}}, + }}, + } + assert.Equal(t, LiteralTypeToStr(nil), "None") + assert.Equal(t, LiteralTypeToStr(dataclassType), "simple: STRUCT structure{dataclass_type:{key:a value:{simple:INTEGER}}") + assert.NotEqual(t, LiteralTypeToStr(dataclassType), dataclassType.String()) + + // Test for SimpleType + simpleType := &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + } + assert.Equal(t, LiteralTypeToStr(simpleType), "simple:INTEGER") + assert.Equal(t, LiteralTypeToStr(simpleType), simpleType.String()) +} diff --git a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go index 26f50d4ddd..2b94570c20 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go @@ -42,7 +42,7 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor continue } if !validators.AreTypesCastable(inputType, v.Type) { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, v.Type.String(), inputType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.Type), common.LiteralTypeToStr(inputType))) continue } diff --git a/flytepropeller/pkg/compiler/validators/bindings.go b/flytepropeller/pkg/compiler/validators/bindings.go index 53535ba260..b69dda529f 100644 --- a/flytepropeller/pkg/compiler/validators/bindings.go +++ b/flytepropeller/pkg/compiler/validators/bindings.go @@ -131,7 +131,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding // If the variable has an index. We expect param to be a collection. if v.Index != nil { if cType := param.GetType().GetCollectionType(); cType == nil { - errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, param.Type.String(), inputVar, expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.Type), inputVar, c.LiteralTypeToStr(expectedType))) } else { sourceType = cType } @@ -164,7 +164,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding return param.GetType(), []c.NodeID{val.Promise.NodeId}, true } - errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, sourceType.String(), inputVar, expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, c.LiteralTypeToStr(sourceType), inputVar, c.LiteralTypeToStr(expectedType))) return nil, nil, !errs.HasErrors() } } @@ -180,7 +180,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding if literalType == nil { errs.Collect(errors.NewUnrecognizedValueErr(nodeID, reflect.TypeOf(val.Scalar.GetValue()).String())) } else if validateParamTypes && !AreTypesCastable(literalType, expectedType) { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, nodeParam, literalType.String(), expectedType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, nodeParam, c.LiteralTypeToStr(literalType), c.LiteralTypeToStr(expectedType))) } if expectedType.GetEnumType() != nil { diff --git a/flytepropeller/pkg/compiler/validators/condition.go b/flytepropeller/pkg/compiler/validators/condition.go index 8e202b6423..70b72cde8a 100644 --- a/flytepropeller/pkg/compiler/validators/condition.go +++ b/flytepropeller/pkg/compiler/validators/condition.go @@ -44,7 +44,7 @@ func ValidateBooleanExpression(w c.WorkflowBuilder, node c.NodeBuilder, expr *fl if op1Valid && op2Valid && op1Type != nil && op2Type != nil { if op1Type.String() != op2Type.String() { errs.Collect(errors.NewMismatchingTypesErr(node.GetId(), "RightValue", - op1Type.String(), op2Type.String())) + c.LiteralTypeToStr(op1Type), c.LiteralTypeToStr(op2Type))) } } } else if expr.GetConjunction() != nil { diff --git a/flytepropeller/pkg/compiler/validators/vars.go b/flytepropeller/pkg/compiler/validators/vars.go index 53ca67e4ee..e114dc4fc0 100644 --- a/flytepropeller/pkg/compiler/validators/vars.go +++ b/flytepropeller/pkg/compiler/validators/vars.go @@ -40,7 +40,7 @@ func validateInputVar(n c.NodeBuilder, paramName string, requireParamType bool, func validateVarType(nodeID c.NodeID, paramName string, param *flyte.Variable, expectedType *flyte.LiteralType, errs errors.CompileErrors) (ok bool) { if param.GetType().String() != expectedType.String() { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, paramName, param.GetType().String(), expectedType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, paramName, c.LiteralTypeToStr(param.GetType()), c.LiteralTypeToStr(expectedType))) } return !errs.HasErrors() diff --git a/flytepropeller/pkg/controller/config/config.go b/flytepropeller/pkg/controller/config/config.go index 488ada1127..4801b8993a 100644 --- a/flytepropeller/pkg/controller/config/config.go +++ b/flytepropeller/pkg/controller/config/config.go @@ -348,6 +348,7 @@ const ( type ArrayNodeConfig struct { EventVersion int `json:"event-version" pflag:",ArrayNode eventing version. 0 => legacy (drop-in replacement for maptask), 1 => new"` DefaultParallelismBehavior ParallelismBehavior `json:"default-parallelism-behavior" pflag:",Default parallelism behavior for array nodes"` + UseMapPluginLogs bool `json:"use-map-plugin-logs" pflag:",Override subNode log links with those configured for the map plugin logs"` } // GetConfig extracts the Configuration from the global config module in flytestdlib and returns the corresponding type-casted object. diff --git a/flytepropeller/pkg/controller/config/config_flags.go b/flytepropeller/pkg/controller/config/config_flags.go index d8496a56fe..5d26351908 100755 --- a/flytepropeller/pkg/controller/config/config_flags.go +++ b/flytepropeller/pkg/controller/config/config_flags.go @@ -112,6 +112,7 @@ func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "node-execution-worker-count"), defaultConfig.NodeExecutionWorkerCount, "Number of workers to evaluate node executions, currently only used for array nodes") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "array-node-config.event-version"), defaultConfig.ArrayNode.EventVersion, "ArrayNode eventing version. 0 => legacy (drop-in replacement for maptask), 1 => new") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "array-node-config.default-parallelism-behavior"), defaultConfig.ArrayNode.DefaultParallelismBehavior, "Default parallelism behavior for array nodes") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "array-node-config.use-map-plugin-logs"), defaultConfig.ArrayNode.UseMapPluginLogs, "Override subNode log links with those configured for the map plugin logs") cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "literal-offloading-config.Enabled"), defaultConfig.LiteralOffloadingConfig.Enabled, "") cmdFlags.StringToString(fmt.Sprintf("%v%v", prefix, "literal-offloading-config.supported-sdk-versions"), defaultConfig.LiteralOffloadingConfig.SupportedSDKVersions, "Maps flytekit and union SDK names to minimum supported version that can handle reading offloaded literals.") cmdFlags.Int64(fmt.Sprintf("%v%v", prefix, "literal-offloading-config.min-size-in-mb-for-offloading"), defaultConfig.LiteralOffloadingConfig.MinSizeInMBForOffloading, "Size of a literal at which to trigger offloading") diff --git a/flytepropeller/pkg/controller/config/config_flags_test.go b/flytepropeller/pkg/controller/config/config_flags_test.go index 109dc47b28..380a4b940b 100755 --- a/flytepropeller/pkg/controller/config/config_flags_test.go +++ b/flytepropeller/pkg/controller/config/config_flags_test.go @@ -967,6 +967,20 @@ func TestConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_array-node-config.use-map-plugin-logs", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("array-node-config.use-map-plugin-logs", testValue) + if vBool, err := cmdFlags.GetBool("array-node-config.use-map-plugin-logs"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.ArrayNode.UseMapPluginLogs) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_literal-offloading-config.Enabled", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flytepropeller/pkg/controller/controller.go b/flytepropeller/pkg/controller/controller.go index 39047e811d..8d733c33a3 100644 --- a/flytepropeller/pkg/controller/controller.go +++ b/flytepropeller/pkg/controller/controller.go @@ -338,23 +338,6 @@ func New(ctx context.Context, cfg *config.Config, kubeClientset kubernetes.Inter return nil, errors.Wrapf(err, "Failed to create Metadata storage") } - var launchPlanActor launchplan.FlyteAdmin - if cfg.EnableAdminLauncher { - launchPlanActor, err = launchplan.NewAdminLaunchPlanExecutor(ctx, adminClient, cfg.DownstreamEval.Duration, - launchplan.GetAdminConfig(), scope.NewSubScope("admin_launcher"), store) - if err != nil { - logger.Errorf(ctx, "failed to create Admin workflow Launcher, err: %v", err.Error()) - return nil, err - } - - if err := launchPlanActor.Initialize(ctx); err != nil { - logger.Errorf(ctx, "failed to initialize Admin workflow Launcher, err: %v", err.Error()) - return nil, err - } - } else { - launchPlanActor = launchplan.NewFailFastLaunchPlanExecutor() - } - logger.Info(ctx, "Setting up event sink and recorder") eventSink, err := events.ConstructEventSink(ctx, events.GetConfig(ctx), scope.NewSubScope("event_sink")) if err != nil { @@ -434,6 +417,23 @@ func New(ctx context.Context, cfg *config.Config, kubeClientset kubernetes.Inter controller.levelMonitor = NewResourceLevelMonitor(scope.NewSubScope("collector"), flyteworkflowInformer.Lister()) + var launchPlanActor launchplan.FlyteAdmin + if cfg.EnableAdminLauncher { + launchPlanActor, err = launchplan.NewAdminLaunchPlanExecutor(ctx, adminClient, launchplan.GetAdminConfig(), + scope.NewSubScope("admin_launcher"), store, controller.enqueueWorkflowForNodeUpdates) + if err != nil { + logger.Errorf(ctx, "failed to create Admin workflow Launcher, err: %v", err.Error()) + return nil, err + } + + if err := launchPlanActor.Initialize(ctx); err != nil { + logger.Errorf(ctx, "failed to initialize Admin workflow Launcher, err: %v", err.Error()) + return nil, err + } + } else { + launchPlanActor = launchplan.NewFailFastLaunchPlanExecutor() + } + recoveryClient := recovery.NewClient(adminClient) nodeHandlerFactory, err := factory.NewHandlerFactory(ctx, launchPlanActor, launchPlanActor, kubeClient, kubeClientset, catalogClient, recoveryClient, &cfg.EventConfig, cfg.LiteralOffloadingConfig, cfg.ClusterID, signalClient, scope) diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder.go b/flytepropeller/pkg/controller/nodes/array/event_recorder.go index ac1ad3e39f..999b383f39 100644 --- a/flytepropeller/pkg/controller/nodes/array/event_recorder.go +++ b/flytepropeller/pkg/controller/nodes/array/event_recorder.go @@ -10,14 +10,44 @@ import ( idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/logs" + pluginscore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/encoding" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/tasklog" + mapplugin "github.com/flyteorg/flyte/flyteplugins/go/tasks/plugins/array/k8s" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/plugins/k8s/pod" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/common" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task" + "github.com/flyteorg/flyte/flytestdlib/logger" ) +type taskExecutionID struct { + pluginscore.TaskExecutionID + + generatedName string + id *idlcore.TaskExecutionIdentifier + nodeID string +} + +func (t *taskExecutionID) GetGeneratedName() string { + return t.generatedName +} + +func (t *taskExecutionID) GetID() idlcore.TaskExecutionIdentifier { + return *t.id +} + +func (t *taskExecutionID) GetGeneratedNameWith(minLength, maxLength int) (string, error) { + return "", nil +} + +func (t *taskExecutionID) GetUniqueNodeID() string { + return t.nodeID +} + type arrayEventRecorder interface { interfaces.EventRecorder process(ctx context.Context, nCtx interfaces.NodeExecutionContext, index int, retryAttempt uint32) error @@ -83,7 +113,25 @@ func (e *externalResourcesEventRecorder) process(ctx context.Context, nCtx inter }) } + var mapLogPlugin tasklog.Plugin + if config.GetConfig().ArrayNode.UseMapPluginLogs { + mapLogPlugin, err = logs.InitializeLogPlugins(&mapplugin.GetConfig().LogConfig.Config) + if err != nil { + logger.Warnf(ctx, "failed to initialize log plugin with error:%v", err) + } + } + for _, taskExecutionEvent := range e.taskEvents { + if mapLogPlugin != nil && len(taskExecutionEvent.Logs) > 0 { + // override log links for subNode execution with map plugin + logs, err := getPluginLogs(mapLogPlugin, nCtx, index, retryAttempt) + if err != nil { + logger.Warnf(ctx, "failed to compute logs for ArrayNode:%s index:%d retryAttempt:%d with error:%v", nCtx.NodeID(), index, retryAttempt, err) + } else { + taskExecutionEvent.Logs = logs + } + } + for _, log := range taskExecutionEvent.Logs { log.Name = fmt.Sprintf("%s-%d", log.Name, index) } @@ -213,6 +261,85 @@ func newArrayEventRecorder(eventRecorder interfaces.EventRecorder) arrayEventRec } } +func getPluginLogs(logPlugin tasklog.Plugin, nCtx interfaces.NodeExecutionContext, index int, retryAttempt uint32) ([]*idlcore.TaskLog, error) { + subNodeSpec := nCtx.Node().GetArrayNode().GetSubNodeSpec() + + // retrieve taskTemplate from subNode + taskID := subNodeSpec.GetTaskID() + executableTask, err := nCtx.ExecutionContext().GetTask(*taskID) + if err != nil { + return nil, err + } + + taskTemplate := executableTask.CoreTask() + + // build TaskExecutionID + taskExecutionIdentifier := &idlcore.TaskExecutionIdentifier{ + TaskId: taskTemplate.GetId(), // use taskID from subNodeSpec + RetryAttempt: nCtx.CurrentAttempt(), + NodeExecutionId: nCtx.NodeExecutionMetadata().GetNodeExecutionID(), // use node metadata from ArrayNode + } + + nodeID := nCtx.NodeID() + if nCtx.ExecutionContext().GetEventVersion() != v1alpha1.EventVersion0 { + var err error + nodeID, err = common.GenerateUniqueID(nCtx.ExecutionContext().GetParentInfo(), nCtx.NodeID()) + if err != nil { + return nil, err + } + } + + length := task.IDMaxLength + if l := pod.DefaultPodPlugin.GetProperties().GeneratedNameMaxLength; l != nil { + length = *l + } + + uniqueID, err := encoding.FixedLengthUniqueIDForParts(length, []string{nCtx.NodeExecutionMetadata().GetOwnerID().Name, nodeID, strconv.Itoa(int(nCtx.CurrentAttempt()))}) + if err != nil { + return nil, err + } + + taskExecID := &taskExecutionID{ + generatedName: uniqueID, + id: taskExecutionIdentifier, + nodeID: nodeID, + } + + // compute podName and containerName + stCtx := mapplugin.NewSubTaskExecutionID(taskExecID, index, uint64(retryAttempt)) + + podName := stCtx.GetGeneratedName() + containerName := stCtx.GetGeneratedName() + + // initialize map plugin specific LogTemplateVars + extraLogTemplateVars := []tasklog.TemplateVar{ + { + Regex: mapplugin.LogTemplateRegexes.ExecutionIndex, + Value: strconv.FormatUint(uint64(index), 10), + }, + { + Regex: mapplugin.LogTemplateRegexes.RetryAttempt, + Value: strconv.FormatUint(uint64(retryAttempt), 10), + }, + } + + logs, err := logPlugin.GetTaskLogs( + tasklog.Input{ + PodName: podName, + Namespace: nCtx.NodeExecutionMetadata().GetNamespace(), + ContainerName: containerName, + TaskExecutionID: taskExecID, + ExtraTemplateVars: extraLogTemplateVars, + TaskTemplate: taskTemplate, + }, + ) + if err != nil { + return nil, err + } + + return logs.TaskLogs, nil +} + func sendEvents(ctx context.Context, nCtx interfaces.NodeExecutionContext, index int, retryAttempt uint32, nodePhase idlcore.NodeExecution_Phase, taskPhase idlcore.TaskExecution_Phase, eventRecorder interfaces.EventRecorder, eventConfig *config.EventConfig) error { diff --git a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go index 9d0f6faeb5..64fbff7666 100644 --- a/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go +++ b/flytepropeller/pkg/controller/nodes/array/event_recorder_test.go @@ -2,9 +2,19 @@ package array import ( "context" + "testing" + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/types" + + idlcore "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/logs" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" + execmocks "github.com/flyteorg/flyte/flytepropeller/pkg/controller/executors/mocks" + "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces/mocks" ) type bufferedEventRecorder struct { @@ -25,3 +35,72 @@ func (b *bufferedEventRecorder) RecordNodeEvent(ctx context.Context, nodeExecuti func newBufferedEventRecorder() *bufferedEventRecorder { return &bufferedEventRecorder{} } + +func TestGetPluginLogs(t *testing.T) { + // initialize log plugin + logConfig := &logs.LogConfig{ + Templates: []tasklog.TemplateLogPlugin{ + tasklog.TemplateLogPlugin{ + Name: "foo", + DisplayName: "bar", + TemplateURIs: []tasklog.TemplateURI{ + "/console/projects/{{.executionProject}}/domains/{{.executionDomain}}/executions/{{.executionName}}/nodeId/{{.nodeID}}/taskId/{{.taskID}}/attempt/{{.taskRetryAttempt}}/mappedIndex/{{.subtaskExecutionIndex}}/mappedAttempt/{{.subtaskRetryAttempt}}/view/logs?duration=all", + }, + }, + }, + } + + mapLogPlugin, err := logs.InitializeLogPlugins(logConfig) + assert.Nil(t, err) + + // create NodeExecutionContext + nCtx := &mocks.NodeExecutionContext{} + nCtx.OnCurrentAttempt().Return(uint32(0)) + + executionContext := &execmocks.ExecutionContext{} + executionContext.OnGetEventVersion().Return(1) + executionContext.OnGetParentInfo().Return(nil) + executionContext.OnGetTaskMatch(taskRef).Return( + &v1alpha1.TaskSpec{ + TaskTemplate: &idlcore.TaskTemplate{ + Id: &idlcore.Identifier{ + ResourceType: idlcore.ResourceType_TASK, + Project: "task_project", + Domain: "task_domain", + Name: "task_name", + Version: "task_version", + }, + }, + }, + nil, + ) + nCtx.OnExecutionContext().Return(executionContext) + + nCtx.OnNode().Return(&arrayNodeSpec) + + nodeExecutionMetadata := &mocks.NodeExecutionMetadata{} + nodeExecutionMetadata.OnGetNamespace().Return("node_namespace") + nodeExecutionMetadata.OnGetNodeExecutionID().Return(&idlcore.NodeExecutionIdentifier{ + NodeId: "node_id", + ExecutionId: &idlcore.WorkflowExecutionIdentifier{ + Project: "node_project", + Domain: "node_domain", + Name: "node_name", + }, + }) + nodeExecutionMetadata.OnGetOwnerID().Return(types.NamespacedName{ + Namespace: "wf_namespace", + Name: "wf_name", + }) + nCtx.OnNodeExecutionMetadata().Return(nodeExecutionMetadata) + + nCtx.OnNodeID().Return("foo") + + // call `getPluginLogs` + logs, err := getPluginLogs(mapLogPlugin, nCtx, 1, 0) + assert.Nil(t, err) + + assert.Equal(t, len(logConfig.Templates), len(logs)) + assert.Equal(t, "bar", logs[0].Name) + assert.Equal(t, "/console/projects/node_project/domains/node_domain/executions/node_name/nodeId/foo/taskId/task_name/attempt/0/mappedIndex/1/mappedAttempt/0/view/logs?duration=all", logs[0].Uri) +} diff --git a/flytepropeller/pkg/controller/nodes/array/handler.go b/flytepropeller/pkg/controller/nodes/array/handler.go index 834a016cb2..7495c77e16 100644 --- a/flytepropeller/pkg/controller/nodes/array/handler.go +++ b/flytepropeller/pkg/controller/nodes/array/handler.go @@ -364,6 +364,7 @@ func (a *arrayNodeHandler) Handle(ctx context.Context, nCtx interfaces.NodeExecu } } } + if err := eventRecorder.process(ctx, nCtx, index, subNodeStatus.GetAttempts()); err != nil { return handler.UnknownTransition, err } diff --git a/flytepropeller/pkg/controller/nodes/attr_path_resolver.go b/flytepropeller/pkg/controller/nodes/attr_path_resolver.go index 192fa1956c..3b4e46ce50 100644 --- a/flytepropeller/pkg/controller/nodes/attr_path_resolver.go +++ b/flytepropeller/pkg/controller/nodes/attr_path_resolver.go @@ -46,8 +46,7 @@ func resolveAttrPathInPromise(ctx context.Context, datastore *storage.DataStore, } currVal = currVal.GetCollection().GetLiterals()[attr.GetIntValue()] index++ - // scalar is always the leaf, so we can break here - case *core.Literal_Scalar: + default: break } } @@ -107,9 +106,7 @@ func resolveAttrPathInPbStruct(nodeID string, st *structpb.Struct, bindAttrPath } // resolveAttrPathInBinary resolves the binary idl object (e.g. dataclass, pydantic basemodel) with attribute path -func resolveAttrPathInBinary(nodeID string, binaryIDL *core.Binary, bindAttrPath []*core.PromiseAttribute) (*core. - Literal, - error) { +func resolveAttrPathInBinary(nodeID string, binaryIDL *core.Binary, bindAttrPath []*core.PromiseAttribute) (*core.Literal, error) { binaryBytes := binaryIDL.GetValue() serializationFormat := binaryIDL.GetTag() @@ -165,6 +162,28 @@ func resolveAttrPathInBinary(nodeID string, binaryIDL *core.Binary, bindAttrPath } } + // In arrayNodeHandler, the resolved value should be a literal collection. + // If the current value is already a collection, convert it to a literal collection. + // This conversion does not affect how Flytekit processes the resolved value. + if collection, ok := currVal.([]any); ok { + literals := make([]*core.Literal, len(collection)) + for i, v := range collection { + resolvedBinaryBytes, err := msgpack.Marshal(v) + if err != nil { + return nil, err + } + literals[i] = constructResolvedBinary(resolvedBinaryBytes, serializationFormat) + } + + return &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: literals, + }, + }, + }, nil + } + // Marshal the current value to MessagePack bytes resolvedBinaryBytes, err := msgpack.Marshal(currVal) if err != nil { diff --git a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go index 1467fc0ea4..e8e28ac08f 100644 --- a/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go +++ b/flytepropeller/pkg/controller/nodes/attr_path_resolver_test.go @@ -10,13 +10,14 @@ import ( "github.com/stretchr/testify/assert" "google.golang.org/protobuf/types/known/structpb" + "github.com/flyteorg/flyte/flyteidl/clients/go/coreutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/errors" ) -// FlyteFile and FlyteDirectory represented as map[interface{}]interface{} -type FlyteFile map[interface{}]interface{} -type FlyteDirectory map[interface{}]interface{} +// FlyteFile and FlyteDirectory represented as map[any]any +type FlyteFile map[any]any +type FlyteDirectory map[any]any // InnerDC struct (equivalent to InnerDC dataclass in Python) type InnerDC struct { @@ -73,7 +74,7 @@ func NewScalarLiteral(value string) *core.Literal { } } -func NewStructFromMap(m map[string]interface{}) *structpb.Struct { +func NewStructFromMap(m map[string]any) *structpb.Struct { st, _ := structpb.NewStruct(m) return st } @@ -135,7 +136,7 @@ func TestResolveAttrPathInStruct(t *testing.T) { Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ - Generic: NewStructFromMap(map[string]interface{}{"foo": "bar"}), + Generic: NewStructFromMap(map[string]any{"foo": "bar"}), }, }, }, @@ -157,8 +158,8 @@ func TestResolveAttrPathInStruct(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ Generic: NewStructFromMap( - map[string]interface{}{ - "foo": []interface{}{"bar1", "bar2"}, + map[string]any{ + "foo": []any{"bar1", "bar2"}, }, ), }, @@ -187,8 +188,8 @@ func TestResolveAttrPathInStruct(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ Generic: NewStructFromMap( - map[string]interface{}{ - "foo": []interface{}{[]interface{}{"bar1", "bar2"}}, + map[string]any{ + "foo": []any{[]any{"bar1", "bar2"}}, }, ), }, @@ -236,7 +237,7 @@ func TestResolveAttrPathInStruct(t *testing.T) { Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ - Generic: NewStructFromMap(map[string]interface{}{"bar": "car"}), + Generic: NewStructFromMap(map[string]any{"bar": "car"}), }, }, }, @@ -276,9 +277,9 @@ func TestResolveAttrPathInStruct(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ Generic: NewStructFromMap( - map[string]interface{}{ - "foo": map[string]interface{}{ - "bar": map[string]interface{}{ + map[string]any{ + "foo": map[string]any{ + "bar": map[string]any{ "baz": 42, }, }, @@ -306,7 +307,7 @@ func TestResolveAttrPathInStruct(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ Generic: NewStructFromMap( - map[string]interface{}{ + map[string]any{ "baz": 42, }, ), @@ -365,7 +366,7 @@ func TestResolveAttrPathInStruct(t *testing.T) { Value: &core.Literal_Scalar{ Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ - Generic: NewStructFromMap(map[string]interface{}{"foo": "bar"}), + Generic: NewStructFromMap(map[string]any{"foo": "bar"}), }, }, }, @@ -387,8 +388,8 @@ func TestResolveAttrPathInStruct(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Generic{ Generic: NewStructFromMap( - map[string]interface{}{ - "foo": []interface{}{"bar1", "bar2"}, + map[string]any{ + "foo": []any{"bar1", "bar2"}, }, ), }, @@ -495,11 +496,35 @@ func createNestedDC() DC { func TestResolveAttrPathInBinary(t *testing.T) { // Helper function to convert a map to msgpack bytes and then to BinaryIDL - toMsgpackBytes := func(m interface{}) []byte { + toMsgpackBytes := func(m any) []byte { msgpackBytes, err := msgpack.Marshal(m) assert.NoError(t, err) return msgpackBytes } + toLiteralCollectionWithMsgpackBytes := func(collection []any) *core.Literal { + literals := make([]*core.Literal, len(collection)) + for i, v := range collection { + resolvedBinaryBytes, _ := msgpack.Marshal(v) + literals[i] = constructResolvedBinary(resolvedBinaryBytes, coreutils.MESSAGEPACK) + } + return &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: literals, + }, + }, + } + } + fromLiteralCollectionWithMsgpackBytes := func(lv *core.Literal) []any { + literals := lv.GetCollection().GetLiterals() + collection := make([]any, len(literals)) + for i, l := range literals { + var v any + _ = msgpack.Unmarshal(l.GetScalar().GetBinary().Value, &v) + collection[i] = v + } + return collection + } flyteFile := FlyteFile{ "path": "s3://my-s3-bucket/example.txt", @@ -630,18 +655,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]int{0, 1, 2, -1, -2}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{0, 1, 2, -1, -2}), hasError: false, }, { @@ -653,18 +667,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]FlyteFile{{"path": "s3://my-s3-bucket/example.txt"}}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{flyteFile}), hasError: false, }, { @@ -676,18 +679,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([][]int{{0}, {1}, {-1}}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{[]int{0}, []int{1}, []int{-1}}), hasError: false, }, { @@ -699,18 +691,8 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]map[int]bool{{0: false}, {1: true}, {-1: true}}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{map[int]bool{0: false}, map[int]bool{1: true}, + map[int]bool{-1: true}}), hasError: false, }, { @@ -1037,18 +1019,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]int{0, 1, 2, -1, -2}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{0, 1, 2, -1, -2}), hasError: false, }, { @@ -1065,18 +1036,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]FlyteFile{flyteFile}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{flyteFile}), hasError: false, }, { @@ -1093,18 +1053,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([][]int{{0}, {1}, {-1}}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{[]int{0}, []int{1}, []int{-1}}), hasError: false, }, { @@ -1126,18 +1075,7 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]int{0}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{0}), hasError: false, }, { @@ -1192,18 +1130,11 @@ func TestResolveAttrPathInBinary(t *testing.T) { }, }, }, - expected: &core.Literal{ - Value: &core.Literal_Scalar{ - Scalar: &core.Scalar{ - Value: &core.Scalar_Binary{ - Binary: &core.Binary{ - Value: toMsgpackBytes([]map[int]bool{{0: false}, {1: true}, {-1: true}}), - Tag: "msgpack", - }, - }, - }, - }, - }, + expected: toLiteralCollectionWithMsgpackBytes([]any{ + map[int]bool{0: false}, + map[int]bool{1: true}, + map[int]bool{-1: true}, + }), hasError: false, }, { @@ -1422,10 +1353,10 @@ func TestResolveAttrPathInBinary(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Binary{ Binary: &core.Binary{ - Value: toMsgpackBytes(map[string]interface{}{ - "foo": map[string]interface{}{ + Value: toMsgpackBytes(map[string]any{ + "foo": map[string]any{ "bar": int64(42), - "baz": map[string]interface{}{ + "baz": map[string]any{ "qux": 3.14, "quux": "str", }, @@ -1465,8 +1396,8 @@ func TestResolveAttrPathInBinary(t *testing.T) { Scalar: &core.Scalar{ Value: &core.Scalar_Binary{ Binary: &core.Binary{ - Value: toMsgpackBytes(map[string]interface{}{ - "foo": []interface{}{int64(42), 3.14, "str"}, + Value: toMsgpackBytes(map[string]any{ + "foo": []any{int64(42), 3.14, "str"}, }), Tag: "msgpack", }, @@ -1499,13 +1430,13 @@ func TestResolveAttrPathInBinary(t *testing.T) { assert.Error(t, err, i) assert.ErrorContains(t, err, errors.PromiseAttributeResolveError, i) } else { - var expectedValue, actualValue interface{} + var expectedValue, actualValue any - // Helper function to unmarshal a Binary Literal into an interface{} - unmarshalBinaryLiteral := func(literal *core.Literal) (interface{}, error) { + // Helper function to unmarshal a Binary Literal into an any + unmarshalBinaryLiteral := func(literal *core.Literal) (any, error) { if scalar, ok := literal.Value.(*core.Literal_Scalar); ok { if binary, ok := scalar.Scalar.Value.(*core.Scalar_Binary); ok { - var value interface{} + var value any err := msgpack.Unmarshal(binary.Binary.Value, &value) return value, err } @@ -1513,16 +1444,22 @@ func TestResolveAttrPathInBinary(t *testing.T) { return nil, fmt.Errorf("literal is not a Binary Scalar") } - // Unmarshal the expected value - expectedValue, err := unmarshalBinaryLiteral(arg.expected) - if err != nil { - t.Fatalf("Failed to unmarshal expected value in test case %d: %v", i, err) + if arg.expected.GetCollection() != nil { + expectedValue = fromLiteralCollectionWithMsgpackBytes(arg.expected) + } else { + expectedValue, err = unmarshalBinaryLiteral(arg.expected) + if err != nil { + t.Fatalf("Failed to unmarshal expected value in test case %d: %v", i, err) + } } - // Unmarshal the resolved value - actualValue, err = unmarshalBinaryLiteral(resolved) - if err != nil { - t.Fatalf("Failed to unmarshal resolved value in test case %d: %v", i, err) + if resolved.GetCollection() != nil { + actualValue = fromLiteralCollectionWithMsgpackBytes(resolved) + } else { + actualValue, err = unmarshalBinaryLiteral(resolved) + if err != nil { + t.Fatalf("Failed to unmarshal resolved value in test case %d: %v", i, err) + } } // Deeply compare the expected and actual values, ignoring map ordering diff --git a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go index 389ea0439b..3cb27dd65f 100644 --- a/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go +++ b/flytepropeller/pkg/controller/nodes/dynamic/dynamic_workflow_test.go @@ -499,6 +499,11 @@ func Test_dynamicNodeHandler_buildContextualDynamicWorkflow_withLaunchPlans(t *t int64(1501), storage.Options{}, mock.MatchedBy(func(rdr *bytes.Reader) bool { return true })).Return(errors.New("foo")) + composedPBStore.OnWriteProtobufMatch( + mock.MatchedBy(func(ctx context.Context) bool { return true }), + storage.DataReference("s3://my-s3-bucket/foo/bar/dynamic_compiled.pb"), + storage.Options{}, + mock.MatchedBy(func(pb *core.CompiledWorkflowClosure) bool { return true })).Return(nil) referenceConstructor := storageMocks.ReferenceConstructor{} referenceConstructor.On("ConstructReference", mock.MatchedBy(func(ctx context.Context) bool { return true }), storage.DataReference("output-dir"), "futures.pb").Return( diff --git a/flytepropeller/pkg/controller/nodes/executor.go b/flytepropeller/pkg/controller/nodes/executor.go index 2c3103e4ad..b25ad64fb6 100644 --- a/flytepropeller/pkg/controller/nodes/executor.go +++ b/flytepropeller/pkg/controller/nodes/executor.go @@ -1248,10 +1248,17 @@ func (c *nodeExecutor) handleQueuedOrRunningNode(ctx context.Context, nCtx inter targetEntity := common.GetTargetEntity(ctx, nCtx) - nev, err := ToNodeExecutionEvent(nCtx.NodeExecutionMetadata().GetNodeExecutionID(), - p, nCtx.InputReader().GetInputPath().String(), nCtx.NodeStatus(), nCtx.ExecutionContext().GetEventVersion(), - nCtx.ExecutionContext().GetParentInfo(), nCtx.Node(), c.clusterID, nCtx.NodeStateReader().GetDynamicNodeState().Phase, - c.eventConfig, targetEntity) + nev, err := ToNodeExecutionEvent( + nCtx.NodeExecutionMetadata().GetNodeExecutionID(), + p, + nCtx.InputReader().GetInputPath().String(), + nCtx.NodeStatus(), + nCtx.ExecutionContext().GetEventVersion(), + nCtx.ExecutionContext().GetParentInfo(), nCtx.Node(), + c.clusterID, + nCtx.NodeStateReader().GetDynamicNodeState().Phase, + c.eventConfig, + targetEntity) if err != nil { return interfaces.NodeStatusUndefined, errors.Wrapf(errors.IllegalStateError, nCtx.NodeID(), err, "could not convert phase info to event") } diff --git a/flytepropeller/pkg/controller/nodes/executor_test.go b/flytepropeller/pkg/controller/nodes/executor_test.go index 7fc4c05992..35ab105623 100644 --- a/flytepropeller/pkg/controller/nodes/executor_test.go +++ b/flytepropeller/pkg/controller/nodes/executor_test.go @@ -1723,6 +1723,7 @@ func TestNodeExecutor_FinalizeHandler(t *testing.T) { assert.NoError(t, exec.FinalizeHandler(ctx, nil, nil, nl, n)) }) } + func TestNodeExecutionEventStartNode(t *testing.T) { execID := &core.WorkflowExecutionIdentifier{ Name: "e1", @@ -1763,9 +1764,11 @@ func TestNodeExecutionEventStartNode(t *testing.T) { ns.OnGetParentTaskID().Return(tID) ns.OnGetOutputDirMatch(mock.Anything).Return("dummy://dummyOutUrl") ns.OnGetDynamicNodeStatus().Return(&v1alpha1.DynamicNodeStatus{}) + ev, err := ToNodeExecutionEvent(nID, p, "reference", ns, v1alpha1.EventVersion0, parentInfo, n, testClusterID, v1alpha1.DynamicNodePhaseNone, &config.EventConfig{ RawOutputPolicy: config.RawOutputPolicyReference, }, subWfID) + assert.NoError(t, err) assert.Equal(t, "start-node", ev.Id.NodeId) assert.Equal(t, execID, ev.Id.ExecutionId) @@ -1778,6 +1781,7 @@ func TestNodeExecutionEventStartNode(t *testing.T) { ev.OutputResult.(*event.NodeExecutionEvent_OutputUri).OutputUri) assert.Equal(t, ev.ProducerId, testClusterID) assert.Equal(t, subWfID, ev.GetTargetEntity()) + assert.Nil(t, ev.InputValue) } func TestNodeExecutionEventV0(t *testing.T) { @@ -1821,6 +1825,7 @@ func TestNodeExecutionEventV0(t *testing.T) { assert.Empty(t, ev.NodeName) assert.Empty(t, ev.RetryGroup) assert.Empty(t, ev.TargetEntity) + assert.Equal(t, "reference", ev.GetInputUri()) } func TestNodeExecutionEventV1(t *testing.T) { @@ -1859,9 +1864,11 @@ func TestNodeExecutionEventV1(t *testing.T) { ns.OnGetPhase().Return(v1alpha1.NodePhaseNotYetStarted) nl.OnGetNodeExecutionStatusMatch(mock.Anything, id).Return(ns) ns.OnGetParentTaskID().Return(tID) + eventOpt, err := ToNodeExecutionEvent(nID, p, "reference", ns, v1alpha1.EventVersion1, parentInfo, n, testClusterID, v1alpha1.DynamicNodePhaseNone, &config.EventConfig{ RawOutputPolicy: config.RawOutputPolicyInline, }, nil) + assert.NoError(t, err) assert.Equal(t, "np1-2-n1", eventOpt.Id.NodeId) assert.Equal(t, execID, eventOpt.Id.ExecutionId) @@ -1875,6 +1882,7 @@ func TestNodeExecutionEventV1(t *testing.T) { assert.Equal(t, "2", eventOpt.RetryGroup) assert.True(t, proto.Equal(eventOpt.GetInputData(), inputs)) assert.Empty(t, eventOpt.TargetEntity) + assert.Equal(t, inputs, eventOpt.GetInputData()) } func TestNodeExecutor_RecursiveNodeHandler_ParallelismLimit(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go index bc16e648ab..ea21ce1171 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/handler_test.go @@ -148,6 +148,10 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) { Version: "v", ResourceType: core.ResourceType_LAUNCH_PLAN, } + k8sWorkflowID := types.NamespacedName{ + Namespace: "namespace", + Name: "name", + } mockWfNode := &mocks2.ExecutableWorkflowNode{} mockWfNode.OnGetLaunchPlanRefID().Return(&v1alpha1.Identifier{ Identifier: lpID, @@ -178,6 +182,7 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(nil) nCtx := createNodeContext(v1alpha1.WorkflowNodePhaseUndefined, mockNode, mockNodeStatus) @@ -203,6 +208,7 @@ func TestWorkflowNodeHandler_StartNode_Launchplan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(nil) nCtx := createNodeContextV1(v1alpha1.WorkflowNodePhaseUndefined, mockNode, mockNodeStatus) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go index dd09c5d5d7..60802a6486 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan.go @@ -102,7 +102,8 @@ func (l *launchPlanHandler) StartLaunchPlan(ctx context.Context, nCtx interfaces } } } - err = l.launchPlan.Launch(ctx, launchCtx, childID, nCtx.Node().GetWorkflowNode().GetLaunchPlanRefID().Identifier, nodeInputs) + err = l.launchPlan.Launch(ctx, launchCtx, childID, nCtx.Node().GetWorkflowNode().GetLaunchPlanRefID().Identifier, + nodeInputs, nCtx.NodeExecutionMetadata().GetOwnerID().String()) if err != nil { if launchplan.IsAlreadyExists(err) { logger.Infof(ctx, "Execution already exists [%s].", childID.Name) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go index fbe0a8c1a6..1ce0568bf6 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin.go @@ -3,7 +3,6 @@ package launchplan import ( "context" "fmt" - "time" "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/time/rate" @@ -15,6 +14,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" evtErr "github.com/flyteorg/flyte/flytepropeller/events/errors" + "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/compiler/transformers/k8s" "github.com/flyteorg/flyte/flytestdlib/cache" stdErr "github.com/flyteorg/flyte/flytestdlib/errors" @@ -33,9 +33,10 @@ func IsWorkflowTerminated(p core.WorkflowExecution_Phase) bool { // Executor for Launchplans that executes on a remote FlyteAdmin service (if configured) type adminLaunchPlanExecutor struct { - adminClient service.AdminServiceClient - cache cache.AutoRefresh - store *storage.DataStore + adminClient service.AdminServiceClient + cache cache.AutoRefresh + store *storage.DataStore + enqueueWorkflow v1alpha1.EnqueueWorkflow } type executionCacheItem struct { @@ -43,6 +44,7 @@ type executionCacheItem struct { ExecutionClosure *admin.ExecutionClosure SyncError error ExecutionOutputs *core.LiteralMap + ParentWorkflowID v1alpha1.WorkflowID } func (e executionCacheItem) IsTerminal() bool { @@ -79,8 +81,9 @@ func (a *adminLaunchPlanExecutor) handleLaunchError(ctx context.Context, isRecov } } -func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchContext, - executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) error { +func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, + launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID v1alpha1.WorkflowID) error { + var err error if launchCtx.RecoveryExecution != nil { _, err = a.adminClient.RecoverExecution(ctx, &admin.ExecutionRecoverRequest{ @@ -156,7 +159,7 @@ func (a *adminLaunchPlanExecutor) Launch(ctx context.Context, launchCtx LaunchCo } } - _, err = a.cache.GetOrCreate(executionID.String(), executionCacheItem{WorkflowExecutionIdentifier: *executionID}) + _, err = a.cache.GetOrCreate(executionID.String(), executionCacheItem{WorkflowExecutionIdentifier: *executionID, ParentWorkflowID: parentWorkflowID}) if err != nil { logger.Infof(ctx, "Failed to add ExecID [%v] to auto refresh cache", executionID) } @@ -263,6 +266,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc Item: executionCacheItem{ WorkflowExecutionIdentifier: exec.WorkflowExecutionIdentifier, SyncError: err, + ParentWorkflowID: exec.ParentWorkflowID, }, Action: cache.Update, }) @@ -293,6 +297,7 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc Item: executionCacheItem{ WorkflowExecutionIdentifier: exec.WorkflowExecutionIdentifier, SyncError: err, + ParentWorkflowID: exec.ParentWorkflowID, }, Action: cache.Update, }) @@ -312,23 +317,34 @@ func (a *adminLaunchPlanExecutor) syncItem(ctx context.Context, batch cache.Batc WorkflowExecutionIdentifier: exec.WorkflowExecutionIdentifier, ExecutionClosure: res.Closure, ExecutionOutputs: outputs, + ParentWorkflowID: exec.ParentWorkflowID, }, Action: cache.Update, }) } + // wait until all responses have been processed to enqueue parent workflows. if we do it + // prematurely, there is a chance the parent workflow evaluates before the cache is updated. + for _, itemSyncResponse := range resp { + exec := itemSyncResponse.Item.(executionCacheItem) + if exec.ExecutionClosure != nil && IsWorkflowTerminated(exec.ExecutionClosure.Phase) { + a.enqueueWorkflow(exec.ParentWorkflowID) + } + } + return resp, nil } func NewAdminLaunchPlanExecutor(_ context.Context, client service.AdminServiceClient, - syncPeriod time.Duration, cfg *AdminConfig, scope promutils.Scope, store *storage.DataStore) (FlyteAdmin, error) { + cfg *AdminConfig, scope promutils.Scope, store *storage.DataStore, enqueueWorkflow v1alpha1.EnqueueWorkflow) (FlyteAdmin, error) { exec := &adminLaunchPlanExecutor{ - adminClient: client, - store: store, + adminClient: client, + store: store, + enqueueWorkflow: enqueueWorkflow, } rateLimiter := &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(cfg.TPS), cfg.Burst)} - c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, syncPeriod, cfg.Workers, cfg.MaxCacheSize, scope) + c, err := cache.NewAutoRefreshCache("admin-launcher", exec.syncItem, rateLimiter, cfg.CacheResyncDuration.Duration, cfg.Workers, cfg.MaxCacheSize, scope) if err != nil { return nil, err } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go index 2a442e3262..ead1312e17 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/admin_test.go @@ -18,6 +18,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytestdlib/cache" mocks2 "github.com/flyteorg/flyte/flytestdlib/cache/mocks" + "github.com/flyteorg/flyte/flytestdlib/config" "github.com/flyteorg/flyte/flytestdlib/contextutils" "github.com/flyteorg/flyte/flytestdlib/promutils" "github.com/flyteorg/flyte/flytestdlib/promutils/labeled" @@ -27,6 +28,9 @@ import ( func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { ctx := context.TODO() + adminConfig := defaultAdminConfig + adminConfig.CacheResyncDuration = config.Duration{Duration: time.Millisecond} + id := &core.WorkflowExecutionIdentifier{ Name: "n", Domain: "d", @@ -39,7 +43,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { t.Run("happy", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Millisecond, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) mockClient.On("GetExecution", ctx, @@ -66,7 +70,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { mock.MatchedBy(func(o *admin.WorkflowExecutionGetRequest) bool { return true }), ).Return(nil, status.Error(codes.NotFound, "")) - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Millisecond, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) assert.NoError(t, exec.Initialize(ctx)) @@ -85,6 +89,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.NoError(t, err) @@ -112,7 +117,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { mock.MatchedBy(func(o *admin.WorkflowExecutionGetRequest) bool { return true }), ).Return(nil, status.Error(codes.Canceled, "")) - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Millisecond, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) assert.NoError(t, exec.Initialize(ctx)) @@ -131,6 +136,7 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.NoError(t, err) @@ -147,6 +153,8 @@ func TestAdminLaunchPlanExecutor_GetStatus(t *testing.T) { func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { ctx := context.TODO() + adminConfig := defaultAdminConfig + adminConfig.CacheResyncDuration = config.Duration{Duration: time.Second} id := &core.WorkflowExecutionIdentifier{ Name: "n", Domain: "d", @@ -158,7 +166,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { t.Run("happy", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { @@ -186,6 +194,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.NoError(t, err) // Ensure we haven't mutated the state of the parent workflow. @@ -203,7 +212,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { Name: "orig", }, } - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("RecoverExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionRecoverRequest) bool { @@ -224,6 +233,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.NoError(t, err) }) @@ -239,7 +249,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { Name: "orig", }, } - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) recoveryErr := status.Error(codes.NotFound, "foo") @@ -273,6 +283,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.NoError(t, err) assert.True(t, createCalled) @@ -281,7 +292,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { t.Run("notFound", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { return true }), @@ -301,6 +312,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.Error(t, err) assert.True(t, IsAlreadyExists(err)) @@ -309,7 +321,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { t.Run("other", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("CreateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionCreateRequest) bool { return true }), @@ -329,6 +341,7 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { id, &core.Identifier{}, nil, + "", ) assert.Error(t, err) assert.False(t, IsAlreadyExists(err)) @@ -337,6 +350,8 @@ func TestAdminLaunchPlanExecutor_Launch(t *testing.T) { func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { ctx := context.TODO() + adminConfig := defaultAdminConfig + adminConfig.CacheResyncDuration = config.Duration{Duration: time.Second} id := &core.WorkflowExecutionIdentifier{ Name: "n", Domain: "d", @@ -349,7 +364,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { t.Run("happy", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), @@ -362,7 +377,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { t.Run("notFound", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), @@ -375,7 +390,7 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { t.Run("other", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) mockClient.On("TerminateExecution", ctx, mock.MatchedBy(func(o *admin.ExecutionTerminateRequest) bool { return o.Id == id && o.Cause == reason }), @@ -389,6 +404,8 @@ func TestAdminLaunchPlanExecutor_Kill(t *testing.T) { func TestNewAdminLaunchPlanExecutor_GetLaunchPlan(t *testing.T) { ctx := context.TODO() + adminConfig := defaultAdminConfig + adminConfig.CacheResyncDuration = config.Duration{Duration: time.Second} id := &core.Identifier{ ResourceType: core.ResourceType_LAUNCH_PLAN, Name: "n", @@ -401,7 +418,7 @@ func TestNewAdminLaunchPlanExecutor_GetLaunchPlan(t *testing.T) { t.Run("launch plan found", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) mockClient.OnGetLaunchPlanMatch( ctx, @@ -414,7 +431,7 @@ func TestNewAdminLaunchPlanExecutor_GetLaunchPlan(t *testing.T) { t.Run("launch plan not found", func(t *testing.T) { mockClient := &mocks.AdminServiceClient{} - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Second, defaultAdminConfig, promutils.NewTestScope(), memStore) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), memStore, func(string) {}) assert.NoError(t, err) mockClient.OnGetLaunchPlanMatch( ctx, @@ -443,6 +460,9 @@ type test struct { func TestAdminLaunchPlanExecutorScenarios(t *testing.T) { ctx := context.TODO() + adminConfig := defaultAdminConfig + adminConfig.CacheResyncDuration = config.Duration{Duration: time.Millisecond} + mockExecutionRespWithOutputs := &admin.Execution{ Closure: &admin.ExecutionClosure{ Phase: core.WorkflowExecution_SUCCEEDED, @@ -554,7 +574,7 @@ func TestAdminLaunchPlanExecutorScenarios(t *testing.T) { ComposedProtobufStore: pbStore, ReferenceConstructor: &storageMocks.ReferenceConstructor{}, } - exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, time.Millisecond, defaultAdminConfig, promutils.NewTestScope(), storageClient) + exec, err := NewAdminLaunchPlanExecutor(ctx, mockClient, adminConfig, promutils.NewTestScope(), storageClient, func(string) {}) assert.NoError(t, err) iwMock := &mocks2.ItemWrapper{} diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig.go index ffb14e3182..0c83f803af 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig.go @@ -1,7 +1,10 @@ package launchplan import ( + "time" + ctrlConfig "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" + "github.com/flyteorg/flyte/flytestdlib/config" ) //go:generate pflags AdminConfig --default-var defaultAdminConfig @@ -12,6 +15,9 @@ var ( Burst: 10, MaxCacheSize: 10000, Workers: 10, + CacheResyncDuration: config.Duration{ + Duration: 30 * time.Second, + }, } adminConfigSection = ctrlConfig.MustRegisterSubSection("admin-launcher", defaultAdminConfig) @@ -31,6 +37,9 @@ type AdminConfig struct { MaxCacheSize int `json:"cacheSize" pflag:",Maximum cache in terms of number of items stored."` Workers int `json:"workers" pflag:",Number of parallel workers to work on the queue."` + + // CacheResyncDuration defines the interval that the admin launcher should refresh the launchplan cache. + CacheResyncDuration config.Duration `json:"cache-resync-duration" pflag:",Frequency of re-syncing launchplans within the auto refresh cache."` } func GetAdminConfig() *AdminConfig { diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags.go index 3bb535e179..a0f36edb11 100755 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags.go @@ -54,5 +54,6 @@ func (cfg AdminConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "burst"), defaultAdminConfig.Burst, "Maximum burst for throttle") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "cacheSize"), defaultAdminConfig.MaxCacheSize, "Maximum cache in terms of number of items stored.") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "workers"), defaultAdminConfig.Workers, "Number of parallel workers to work on the queue.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "cache-resync-duration"), defaultAdminConfig.CacheResyncDuration.String(), "Frequency of re-syncing launchplans within the auto refresh cache.") return cmdFlags } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags_test.go index bbff474eb1..7e4f8f4a67 100755 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/adminconfig_flags_test.go @@ -155,4 +155,18 @@ func TestAdminConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_cache-resync-duration", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultAdminConfig.CacheResyncDuration.String() + + cmdFlags.Set("cache-resync-duration", testValue) + if vString, err := cmdFlags.GetString("cache-resync-duration"); err == nil { + testDecodeJson_AdminConfig(t, fmt.Sprintf("%v", vString), &actual.CacheResyncDuration) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/launchplan.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/launchplan.go index f2262ce7b2..344a9144ea 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/launchplan.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/launchplan.go @@ -5,6 +5,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" ) //go:generate mockery -all -case=underscore @@ -36,7 +37,8 @@ type LaunchContext struct { // Executor interface to be implemented by the remote system that can allow workflow launching capabilities type Executor interface { // Launch start an execution of a launchplan - Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) error + Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, + launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID v1alpha1.WorkflowID) error // GetStatus retrieves status of a LaunchPlan execution GetStatus(ctx context.Context, executionID *core.WorkflowExecutionIdentifier) (*admin.ExecutionClosure, *core.LiteralMap, error) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/executor.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/executor.go index aa3a1bdb25..7286c9b3b7 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/executor.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/executor.go @@ -141,8 +141,8 @@ func (_m Executor_Launch) Return(_a0 error) *Executor_Launch { return &Executor_Launch{Call: _m.Call.Return(_a0)} } -func (_m *Executor) OnLaunch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) *Executor_Launch { - c_call := _m.On("Launch", ctx, launchCtx, executionID, launchPlanRef, inputs) +func (_m *Executor) OnLaunch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID string) *Executor_Launch { + c_call := _m.On("Launch", ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) return &Executor_Launch{Call: c_call} } @@ -151,13 +151,13 @@ func (_m *Executor) OnLaunchMatch(matchers ...interface{}) *Executor_Launch { return &Executor_Launch{Call: c_call} } -// Launch provides a mock function with given fields: ctx, launchCtx, executionID, launchPlanRef, inputs -func (_m *Executor) Launch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) error { - ret := _m.Called(ctx, launchCtx, executionID, launchPlanRef, inputs) +// Launch provides a mock function with given fields: ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID +func (_m *Executor) Launch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID string) error { + ret := _m.Called(ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, launchplan.LaunchContext, *core.WorkflowExecutionIdentifier, *core.Identifier, *core.LiteralMap) error); ok { - r0 = rf(ctx, launchCtx, executionID, launchPlanRef, inputs) + if rf, ok := ret.Get(0).(func(context.Context, launchplan.LaunchContext, *core.WorkflowExecutionIdentifier, *core.Identifier, *core.LiteralMap, string) error); ok { + r0 = rf(ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) } else { r0 = ret.Error(0) } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/flyte_admin.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/flyte_admin.go index 3fa881ecfe..ddb887e61c 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/flyte_admin.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/mocks/flyte_admin.go @@ -182,8 +182,8 @@ func (_m FlyteAdmin_Launch) Return(_a0 error) *FlyteAdmin_Launch { return &FlyteAdmin_Launch{Call: _m.Call.Return(_a0)} } -func (_m *FlyteAdmin) OnLaunch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) *FlyteAdmin_Launch { - c_call := _m.On("Launch", ctx, launchCtx, executionID, launchPlanRef, inputs) +func (_m *FlyteAdmin) OnLaunch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID string) *FlyteAdmin_Launch { + c_call := _m.On("Launch", ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) return &FlyteAdmin_Launch{Call: c_call} } @@ -192,13 +192,13 @@ func (_m *FlyteAdmin) OnLaunchMatch(matchers ...interface{}) *FlyteAdmin_Launch return &FlyteAdmin_Launch{Call: c_call} } -// Launch provides a mock function with given fields: ctx, launchCtx, executionID, launchPlanRef, inputs -func (_m *FlyteAdmin) Launch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) error { - ret := _m.Called(ctx, launchCtx, executionID, launchPlanRef, inputs) +// Launch provides a mock function with given fields: ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID +func (_m *FlyteAdmin) Launch(ctx context.Context, launchCtx launchplan.LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID string) error { + ret := _m.Called(ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, launchplan.LaunchContext, *core.WorkflowExecutionIdentifier, *core.Identifier, *core.LiteralMap) error); ok { - r0 = rf(ctx, launchCtx, executionID, launchPlanRef, inputs) + if rf, ok := ret.Get(0).(func(context.Context, launchplan.LaunchContext, *core.WorkflowExecutionIdentifier, *core.Identifier, *core.LiteralMap, string) error); ok { + r0 = rf(ctx, launchCtx, executionID, launchPlanRef, inputs, parentWorkflowID) } else { r0 = ret.Error(0) } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go index 666d3b1797..3f7444788d 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytestdlib/errors" "github.com/flyteorg/flyte/flytestdlib/logger" ) @@ -15,7 +16,9 @@ type failFastWorkflowLauncher struct { Reader } -func (failFastWorkflowLauncher) Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, launchPlanRef *core.Identifier, inputs *core.LiteralMap) error { +func (failFastWorkflowLauncher) Launch(ctx context.Context, launchCtx LaunchContext, executionID *core.WorkflowExecutionIdentifier, + launchPlanRef *core.Identifier, inputs *core.LiteralMap, parentWorkflowID v1alpha1.WorkflowID) error { + logger.Infof(ctx, "Fail: Launch Workflow requested with ExecID [%s], LaunchPlan [%s]", executionID.Name, fmt.Sprintf("%s:%s:%s", launchPlanRef.Project, launchPlanRef.Domain, launchPlanRef.Name)) return errors.Wrapf(RemoteErrorUser, fmt.Errorf("badly configured system"), "please enable admin workflow launch to use launchplans") } diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop_test.go index bf26ee0d60..c875fd3720 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan/noop_test.go @@ -37,7 +37,9 @@ func TestFailFastWorkflowLauncher(t *testing.T) { Domain: "d", Name: "n", }, &core.Identifier{}, - nil) + nil, + "", + ) assert.Error(t, err) }) diff --git a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go index 2e042d72a4..68b5383b78 100644 --- a/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go +++ b/flytepropeller/pkg/controller/nodes/subworkflow/launchplan_test.go @@ -52,6 +52,10 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { Version: "v", ResourceType: core.ResourceType_LAUNCH_PLAN, } + k8sWorkflowID := types.NamespacedName{ + Namespace: "namespace", + Name: "name", + } mockWfNode := &mocks2.ExecutableWorkflowNode{} mockWfNode.On("GetLaunchPlanRefID").Return(&v1alpha1.Identifier{ Identifier: lpID, @@ -81,6 +85,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(nil) wfStatus := &mocks2.MutableWorkflowNodeStatus{} @@ -110,6 +115,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(errors.Wrapf(launchplan.RemoteErrorAlreadyExists, fmt.Errorf("blah"), "failed")) nCtx := createNodeContext(v1alpha1.WorkflowNodePhaseUndefined, mockNode, mockNodeStatus) @@ -136,6 +142,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(errors.Wrapf(launchplan.RemoteErrorSystem, fmt.Errorf("blah"), "failed")) nCtx := createNodeContext(v1alpha1.WorkflowNodePhaseExecuting, mockNode, mockNodeStatus) @@ -162,6 +169,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(errors.Wrapf(launchplan.RemoteErrorUser, fmt.Errorf("blah"), "failed")) nCtx := createNodeContext(v1alpha1.WorkflowNodePhaseExecuting, mockNode, mockNodeStatus) @@ -187,7 +195,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }, }, RecoveryExecution: recoveredExecID, - }, mock.Anything, mock.Anything, mock.Anything).Return(nil) + }, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil) recoveryClient := recoveryMocks.Client{} recoveryClient.On("RecoverNodeExecution", mock.Anything, recoveredExecID, mock.Anything).Return(&admin.NodeExecution{ @@ -216,6 +224,7 @@ func TestSubWorkflowHandler_StartLaunchPlan(t *testing.T) { }), mock.MatchedBy(func(o *core.Identifier) bool { return lpID == o }), mock.MatchedBy(func(o *core.LiteralMap) bool { return o.Literals == nil }), + mock.MatchedBy(func(o string) bool { return o == k8sWorkflowID.String() }), ).Return(nil) wfStatus := &mocks2.MutableWorkflowNodeStatus{} diff --git a/flytepropeller/pkg/controller/nodes/task/future_file_reader.go b/flytepropeller/pkg/controller/nodes/task/future_file_reader.go index fa23986812..c535ed5896 100644 --- a/flytepropeller/pkg/controller/nodes/task/future_file_reader.go +++ b/flytepropeller/pkg/controller/nodes/task/future_file_reader.go @@ -3,6 +3,8 @@ package task import ( "context" + "golang.org/x/sync/errgroup" + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytepropeller/pkg/apis/flyteworkflow/v1alpha1" "github.com/flyteorg/flyte/flytepropeller/pkg/utils" @@ -58,11 +60,14 @@ func (f FutureFileReader) CacheExists(ctx context.Context) (bool, error) { } func (f FutureFileReader) Cache(ctx context.Context, wf *v1alpha1.FlyteWorkflow, workflowClosure *core.CompiledWorkflowClosure) error { - err := f.RemoteFileWorkflowStore.PutFlyteWorkflowCRD(ctx, wf, f.flyteWfCRDCacheLoc) - if err != nil { - return err - } - return f.RemoteFileWorkflowStore.PutCompiledFlyteWorkflow(ctx, workflowClosure, f.flyteWfClosureCacheLoc) + group, ctx := errgroup.WithContext(ctx) + group.Go(func() error { + return f.RemoteFileWorkflowStore.PutFlyteWorkflowCRD(ctx, wf, f.flyteWfCRDCacheLoc) + }) + group.Go(func() error { + return f.RemoteFileWorkflowStore.PutCompiledFlyteWorkflow(ctx, workflowClosure, f.flyteWfClosureCacheLoc) + }) + return group.Wait() } type CacheContents struct { @@ -71,12 +76,18 @@ type CacheContents struct { } func (f FutureFileReader) RetrieveCache(ctx context.Context) (CacheContents, error) { - workflowCRD, err := f.RemoteFileWorkflowStore.GetWorkflowCRD(ctx, f.flyteWfCRDCacheLoc) - if err != nil { - return CacheContents{}, err - } - compiledWorkflow, err := f.RemoteFileWorkflowStore.GetCompiledWorkflow(ctx, f.flyteWfClosureCacheLoc) - if err != nil { + group, ctx := errgroup.WithContext(ctx) + var workflowCRD *v1alpha1.FlyteWorkflow + group.Go(func() (err error) { + workflowCRD, err = f.RemoteFileWorkflowStore.GetWorkflowCRD(ctx, f.flyteWfCRDCacheLoc) + return + }) + var compiledWorkflow *core.CompiledWorkflowClosure + group.Go(func() (err error) { + compiledWorkflow, err = f.RemoteFileWorkflowStore.GetCompiledWorkflow(ctx, f.flyteWfClosureCacheLoc) + return + }) + if err := group.Wait(); err != nil { return CacheContents{}, err } return CacheContents{ diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher.go b/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher.go index e53de83e10..13ebbc4cc5 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher.go @@ -72,7 +72,19 @@ func (e *eventWatcher) OnUpdate(_, newObj interface{}) { } func (e *eventWatcher) OnDelete(obj interface{}) { - event := obj.(*eventsv1.Event) + event, casted := obj.(*eventsv1.Event) + if !casted { + unknown, casted := obj.(cache.DeletedFinalStateUnknown) + if !casted { + logger.Warnf(context.Background(), "Unknown object type [%T] in OnDelete", obj) + } else { + logger.Warnf(context.Background(), "Deleted object of unknown key [%v] type [%T] in OnDelete", + unknown.Key, unknown.Obj) + } + + return + } + objectNsName := types.NamespacedName{Namespace: event.Regarding.Namespace, Name: event.Regarding.Name} eventNsName := types.NamespacedName{Namespace: event.Namespace, Name: event.Name} v, _ := e.objectCache.LoadOrStore(objectNsName, &objectEvents{}) diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher_test.go b/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher_test.go index d3ffbcc5b9..37e4ba11ff 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher_test.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/event_watcher_test.go @@ -9,6 +9,7 @@ import ( eventsv1 "k8s.io/api/events/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" ) func TestEventWatcher_OnAdd(t *testing.T) { @@ -143,6 +144,25 @@ func TestEventWatcher_OnDelete(t *testing.T) { v, _ := ew.objectCache.Load(types.NamespacedName{Namespace: "ns3", Name: "name3"}) assert.Nil(t, v) }) + + t.Run("bad object type", func(t *testing.T) { + ew.OnDelete(cache.DeletedFinalStateUnknown{ + Key: "key", + Obj: &eventsv1.Event{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "eventns3", + Name: "eventname3", + }, + Regarding: corev1.ObjectReference{ + Namespace: "ns3", + Name: "name3", + }, + }, + }) + + v, _ := ew.objectCache.Load(types.NamespacedName{Namespace: "ns3", Name: "name3"}) + assert.Nil(t, v) + }) } func TestEventWatcher_List(t *testing.T) { diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go index 7496fb0d8a..431824dad2 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go @@ -5,6 +5,7 @@ import ( "fmt" "time" + "github.com/prometheus/client_golang/prometheus" "golang.org/x/time/rate" v1 "k8s.io/api/core/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" @@ -64,6 +65,7 @@ type PluginMetrics struct { GetCacheHit labeled.StopWatch GetAPILatency labeled.StopWatch ResourceDeleted labeled.Counter + TaskPodErrors *prometheus.CounterVec } func newPluginMetrics(s promutils.Scope) PluginMetrics { @@ -77,6 +79,8 @@ func newPluginMetrics(s promutils.Scope) PluginMetrics { time.Millisecond, s), ResourceDeleted: labeled.NewCounter("pods_deleted", "Counts how many times CheckTaskStatus is"+ " called with a deleted resource.", s), + TaskPodErrors: s.MustNewCounterVec("task_pod_errors", "Counts how many times task pods failed in given phase with given code", + "phase", "error_code"), } } @@ -355,14 +359,19 @@ func (e PluginManager) Handle(ctx context.Context, tCtx pluginsCore.TaskExecutio return transition, err } + phaseInfo := transition.Info() + if phaseInfo.Err() != nil { + e.metrics.TaskPodErrors.WithLabelValues(phaseInfo.Phase().String(), phaseInfo.Err().GetCode()).Inc() + } + // Add events since last update - version := transition.Info().Version() + version := phaseInfo.Version() lastEventUpdate := pluginState.LastEventUpdate if e.eventWatcher != nil && o != nil { nsName := k8stypes.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()} recentEvents := e.eventWatcher.List(nsName, lastEventUpdate) if len(recentEvents) > 0 { - taskInfo := transition.Info().Info() + taskInfo := phaseInfo.Info() taskInfo.AdditionalReasons = make([]pluginsCore.ReasonInfo, 0, len(recentEvents)) for _, event := range recentEvents { taskInfo.AdditionalReasons = append(taskInfo.AdditionalReasons, @@ -378,9 +387,9 @@ func (e PluginManager) Handle(ctx context.Context, tCtx pluginsCore.TaskExecutio newPluginState := PluginState{ Phase: pluginPhase, K8sPluginState: k8s.PluginState{ - Phase: transition.Info().Phase(), + Phase: phaseInfo.Phase(), PhaseVersion: version, - Reason: transition.Info().Reason(), + Reason: phaseInfo.Reason(), }, LastEventUpdate: lastEventUpdate, } diff --git a/flytepropeller/pkg/controller/nodes/transformers.go b/flytepropeller/pkg/controller/nodes/transformers.go index c9f7d5fc76..a252d17344 100644 --- a/flytepropeller/pkg/controller/nodes/transformers.go +++ b/flytepropeller/pkg/controller/nodes/transformers.go @@ -76,7 +76,8 @@ func ToNodeExecEventPhase(p handler.EPhase) core.NodeExecution_Phase { } } -func ToNodeExecutionEvent(nodeExecID *core.NodeExecutionIdentifier, +func ToNodeExecutionEvent( + nodeExecID *core.NodeExecutionIdentifier, info handler.PhaseInfo, inputPath string, status v1alpha1.ExecutableNodeStatus, @@ -109,9 +110,11 @@ func ToNodeExecutionEvent(nodeExecID *core.NodeExecutionIdentifier, dynamicChain = true } + eInfo := info.GetInfo() var nev *event.NodeExecutionEvent - // Start node is special case where the Inputs and Outputs are the same and hence here we copy the Output file + // Start node is special case where the Outputs are the same and hence here we copy the Output file // into the OutputResult and in admin we copy it over into input as well. + // Start node doesn't have inputs. if nodeExecID.NodeId == v1alpha1.StartNodeID { outputsFile := v1alpha1.GetOutputsFile(status.GetOutputDir()) nev = &event.NodeExecutionEvent{ @@ -139,6 +142,17 @@ func ToNodeExecutionEvent(nodeExecID *core.NodeExecutionIdentifier, TargetEntity: targetEntity, IsInDynamicChain: dynamicChain, } + if eventConfig.RawOutputPolicy == config.RawOutputPolicyInline { + if eInfo != nil { + nev.InputValue = &event.NodeExecutionEvent_InputData{ + InputData: eInfo.Inputs, + } + } + } else { + nev.InputValue = &event.NodeExecutionEvent_InputUri{ + InputUri: inputPath, + } + } } if eventVersion == v1alpha1.EventVersion0 && status.GetParentTaskID() != nil { @@ -163,7 +177,6 @@ func ToNodeExecutionEvent(nodeExecID *core.NodeExecutionIdentifier, nev.NodeName = node.GetName() } - eInfo := info.GetInfo() if eInfo != nil { if eInfo.WorkflowNodeInfo != nil { v := ToNodeExecWorkflowNodeMetadata(eInfo.WorkflowNodeInfo) @@ -201,17 +214,6 @@ func ToNodeExecutionEvent(nodeExecID *core.NodeExecutionIdentifier, nev.IsParent = true } } - if eventConfig.RawOutputPolicy == config.RawOutputPolicyInline { - if eInfo != nil { - nev.InputValue = &event.NodeExecutionEvent_InputData{ - InputData: eInfo.Inputs, - } - } - } else { - nev.InputValue = &event.NodeExecutionEvent_InputUri{ - InputUri: inputPath, - } - } return nev, nil } diff --git a/flytepropeller/pkg/controller/workflow/executor_test.go b/flytepropeller/pkg/controller/workflow/executor_test.go index 1a804d1e4b..2be7238dbb 100644 --- a/flytepropeller/pkg/controller/workflow/executor_test.go +++ b/flytepropeller/pkg/controller/workflow/executor_test.go @@ -40,6 +40,7 @@ import ( nodemocks "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/interfaces/mocks" recoveryMocks "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/recovery/mocks" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/subworkflow/launchplan" + taskconfig "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/config" "github.com/flyteorg/flyte/flytepropeller/pkg/controller/nodes/task/fakeplugins" wfErrors "github.com/flyteorg/flyte/flytepropeller/pkg/controller/workflow/errors" execStats "github.com/flyteorg/flyte/flytepropeller/pkg/controller/workflowstore" @@ -228,6 +229,15 @@ func createTaskExecutorErrorInCheck(t assert.TestingT) pluginCore.PluginEntry { func TestWorkflowExecutor_HandleFlyteWorkflow_Error(t *testing.T) { ctx := context.Background() scope := testScope.NewSubScope("12") + + taskConfig := taskconfig.GetConfig() + taskConfig.TaskPlugins.DefaultForTaskTypes = map[string]string{ + "python-task": "pod", + "container": "pod", + "raw-container": "pod", + "sidecar": "pod", + } + store := createInmemoryDataStore(t, scope.NewSubScope("data_store")) recorder := StdOutEventRecorder() _, err := events.ConstructEventSink(ctx, &events.Config{Type: events.EventSinkLog}, scope.NewSubScope("event_sink")) diff --git a/flytestdlib/database/config.go b/flytestdlib/database/config.go index 16ca0e5708..f55eecda8f 100644 --- a/flytestdlib/database/config.go +++ b/flytestdlib/database/config.go @@ -19,12 +19,13 @@ var defaultConfig = &DbConfig{ ConnMaxLifeTime: config.Duration{Duration: time.Hour}, Postgres: PostgresConfig{ // These values are suitable for local sandbox development - Host: "localhost", - Port: 30001, - DbName: postgresStr, - User: postgresStr, - Password: postgresStr, - ExtraOptions: "sslmode=disable", + Host: "localhost", + ReadReplicaHost: "localhost", + Port: 30001, + DbName: postgresStr, + User: postgresStr, + Password: postgresStr, + ExtraOptions: "sslmode=disable", }, } var configSection = config.MustRegisterSection(database, defaultConfig) @@ -64,10 +65,11 @@ type SQLiteConfig struct { // PostgresConfig includes specific config options for opening a connection to a postgres database. type PostgresConfig struct { - Host string `json:"host" pflag:",The host name of the database server"` - Port int `json:"port" pflag:",The port name of the database server"` - DbName string `json:"dbname" pflag:",The database name"` - User string `json:"username" pflag:",The database user who is connecting to the server."` + Host string `json:"host" pflag:",The host name of the database server"` + ReadReplicaHost string `json:"readReplicaHost" pflag:",The host name of the read replica database server"` + Port int `json:"port" pflag:",The port name of the database server"` + DbName string `json:"dbname" pflag:",The database name"` + User string `json:"username" pflag:",The database user who is connecting to the server."` // Either Password or PasswordPath must be set. Password string `json:"password" pflag:",The database password."` PasswordPath string `json:"passwordPath" pflag:",Points to the file containing the database password."` diff --git a/flytestdlib/database/db.go b/flytestdlib/database/db.go index 8046c7ead4..a964518567 100644 --- a/flytestdlib/database/db.go +++ b/flytestdlib/database/db.go @@ -65,6 +65,31 @@ func GetDB(ctx context.Context, dbConfig *DbConfig, logConfig *logger.Config) ( return gormDb, setupDbConnectionPool(ctx, gormDb, dbConfig) } +// GetReadOnlyDB uses the dbConfig to create gorm DB object for the read replica passed via the config +func GetReadOnlyDB(ctx context.Context, dbConfig *DbConfig, logConfig *logger.Config) (*gorm.DB, error) { + if dbConfig == nil { + panic("Cannot initialize database repository from empty db config") + } + + if dbConfig.Postgres.IsEmpty() || dbConfig.Postgres.ReadReplicaHost == "" { + return nil, fmt.Errorf("read replica host not provided in db config") + } + + gormConfig := &gorm.Config{ + Logger: GetGormLogger(ctx, logConfig), + DisableForeignKeyConstraintWhenMigrating: false, + } + + var gormDb *gorm.DB + var err error + gormDb, err = CreatePostgresReadOnlyDbConnection(ctx, gormConfig, dbConfig.Postgres) + if err != nil { + return nil, err + } + + return gormDb, nil +} + func setupDbConnectionPool(ctx context.Context, gormDb *gorm.DB, dbConfig *DbConfig) error { genericDb, err := gormDb.DB() if err != nil { diff --git a/flytestdlib/database/dbconfig_flags.go b/flytestdlib/database/dbconfig_flags.go index c925094cc2..9fa96f9fa8 100755 --- a/flytestdlib/database/dbconfig_flags.go +++ b/flytestdlib/database/dbconfig_flags.go @@ -55,6 +55,7 @@ func (cfg DbConfig) GetPFlagSet(prefix string) *pflag.FlagSet { cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "maxOpenConnections"), defaultConfig.MaxOpenConnections, "maxOpenConnections sets the maximum number of open connections to the database.") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "connMaxLifeTime"), defaultConfig.ConnMaxLifeTime.String(), "sets the maximum amount of time a connection may be reused") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "postgres.host"), defaultConfig.Postgres.Host, "The host name of the database server") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "postgres.readReplicaHost"), defaultConfig.Postgres.ReadReplicaHost, "The host name of the read replica database server") cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "postgres.port"), defaultConfig.Postgres.Port, "The port name of the database server") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "postgres.dbname"), defaultConfig.Postgres.DbName, "The database name") cmdFlags.String(fmt.Sprintf("%v%v", prefix, "postgres.username"), defaultConfig.Postgres.User, "The database user who is connecting to the server.") diff --git a/flytestdlib/database/dbconfig_flags_test.go b/flytestdlib/database/dbconfig_flags_test.go index 2f0a5d53eb..fd49e69fd8 100755 --- a/flytestdlib/database/dbconfig_flags_test.go +++ b/flytestdlib/database/dbconfig_flags_test.go @@ -169,6 +169,20 @@ func TestDbConfig_SetFlags(t *testing.T) { } }) }) + t.Run("Test_postgres.readReplicaHost", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("postgres.readReplicaHost", testValue) + if vString, err := cmdFlags.GetString("postgres.readReplicaHost"); err == nil { + testDecodeJson_DbConfig(t, fmt.Sprintf("%v", vString), &actual.Postgres.ReadReplicaHost) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) t.Run("Test_postgres.port", func(t *testing.T) { t.Run("Override", func(t *testing.T) { diff --git a/flytestdlib/database/postgres.go b/flytestdlib/database/postgres.go index ec43330af1..7e73226465 100644 --- a/flytestdlib/database/postgres.go +++ b/flytestdlib/database/postgres.go @@ -54,6 +54,18 @@ func getPostgresDsn(ctx context.Context, pgConfig PostgresConfig) string { pgConfig.Host, pgConfig.Port, pgConfig.DbName, pgConfig.User, password, pgConfig.ExtraOptions) } +// Produces the DSN (data source name) for the read replica for opening a postgres db connection. +func getPostgresReadDsn(ctx context.Context, pgConfig PostgresConfig) string { + password := resolvePassword(ctx, pgConfig.Password, pgConfig.PasswordPath) + if len(password) == 0 { + // The password-less case is included for development environments. + return fmt.Sprintf("host=%s port=%d dbname=%s user=%s sslmode=disable", + pgConfig.ReadReplicaHost, pgConfig.Port, pgConfig.DbName, pgConfig.User) + } + return fmt.Sprintf("host=%s port=%d dbname=%s user=%s password=%s %s", + pgConfig.ReadReplicaHost, pgConfig.Port, pgConfig.DbName, pgConfig.User, password, pgConfig.ExtraOptions) +} + // CreatePostgresDbIfNotExists creates DB if it doesn't exist for the passed in config func CreatePostgresDbIfNotExists(ctx context.Context, gormConfig *gorm.Config, pgConfig PostgresConfig) (*gorm.DB, error) { dialector := postgres.Open(getPostgresDsn(ctx, pgConfig)) @@ -94,6 +106,12 @@ func CreatePostgresDbIfNotExists(ctx context.Context, gormConfig *gorm.Config, p return gorm.Open(dialector, gormConfig) } +// CreatePostgresDbConnection creates DB connection and returns the gorm.DB object and error +func CreatePostgresReadOnlyDbConnection(ctx context.Context, gormConfig *gorm.Config, pgConfig PostgresConfig) (*gorm.DB, error) { + dialector := postgres.Open(getPostgresReadDsn(ctx, pgConfig)) + return gorm.Open(dialector, gormConfig) +} + func IsPgErrorWithCode(err error, code string) bool { // Newer versions of the gorm postgres driver seem to use // "github.com/jackc/pgx/v5/pgconn" diff --git a/flytestdlib/database/postgres_test.go b/flytestdlib/database/postgres_test.go index 311b05c351..eb22c6b3aa 100644 --- a/flytestdlib/database/postgres_test.go +++ b/flytestdlib/database/postgres_test.go @@ -66,6 +66,47 @@ func TestGetPostgresDsn(t *testing.T) { }) } +func TestGetPostgresReadDsn(t *testing.T) { + pgConfig := PostgresConfig{ + Host: "localhost", + ReadReplicaHost: "readReplicaHost", + Port: 5432, + DbName: "postgres", + User: "postgres", + ExtraOptions: "sslmode=disable", + } + t.Run("no password", func(t *testing.T) { + dsn := getPostgresReadDsn(context.TODO(), pgConfig) + assert.Equal(t, "host=readReplicaHost port=5432 dbname=postgres user=postgres sslmode=disable", dsn) + }) + t.Run("with password", func(t *testing.T) { + pgConfig.Password = "passw" + dsn := getPostgresReadDsn(context.TODO(), pgConfig) + assert.Equal(t, "host=readReplicaHost port=5432 dbname=postgres user=postgres password=passw sslmode=disable", dsn) + + }) + t.Run("with password, no extra", func(t *testing.T) { + pgConfig.Password = "passwo" + pgConfig.ExtraOptions = "" + dsn := getPostgresReadDsn(context.TODO(), pgConfig) + assert.Equal(t, "host=readReplicaHost port=5432 dbname=postgres user=postgres password=passwo ", dsn) + }) + t.Run("with password path", func(t *testing.T) { + password := "1234abc" + tmpFile, err := ioutil.TempFile("", "prefix") + if err != nil { + t.Errorf("Couldn't open temp file: %v", err) + } + defer tmpFile.Close() + if _, err = tmpFile.WriteString(password); err != nil { + t.Errorf("Couldn't write to temp file: %v", err) + } + pgConfig.PasswordPath = tmpFile.Name() + dsn := getPostgresReadDsn(context.TODO(), pgConfig) + assert.Equal(t, "host=readReplicaHost port=5432 dbname=postgres user=postgres password=1234abc ", dsn) + }) +} + type wrappedError struct { err error } diff --git a/flytestdlib/go.mod b/flytestdlib/go.mod index 98443e36ad..91a9588d9d 100644 --- a/flytestdlib/go.mod +++ b/flytestdlib/go.mod @@ -21,7 +21,8 @@ require ( github.com/magiconair/properties v1.8.6 github.com/mitchellh/mapstructure v1.5.0 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.19.1-0.20240620110541-bccd68204bf4 + github.com/prometheus/common v0.53.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 @@ -37,7 +38,7 @@ require ( golang.org/x/time v0.5.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/grpc v1.62.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.34.1 gorm.io/driver/postgres v1.5.3 gorm.io/driver/sqlite v1.5.4 gorm.io/gorm v1.25.4 @@ -63,7 +64,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -98,12 +99,12 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-sqlite3 v1.14.17 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect @@ -112,9 +113,8 @@ require ( github.com/pelletier/go-toml/v2 v2.0.0-beta.8 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.4.1 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -130,7 +130,7 @@ require ( golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect diff --git a/flytestdlib/go.sum b/flytestdlib/go.sum index aa97777715..5af566903d 100644 --- a/flytestdlib/go.sum +++ b/flytestdlib/go.sum @@ -73,8 +73,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -280,6 +280,8 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1 github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -303,8 +305,6 @@ github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9 github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -332,15 +332,15 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1-0.20240620110541-bccd68204bf4 h1:GCiMVi+gRj5QaXuw8Gkz71k8US0ilrLJmoG/mp5+8dI= +github.com/prometheus/client_golang v1.19.1-0.20240620110541-bccd68204bf4/go.mod h1:JJCmTHsrwjUPYl5HyuWSzf8ZNGQzncCeuj37Rby0GzI= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= @@ -513,8 +513,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -753,8 +753,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/flytestdlib/promutils/client.go b/flytestdlib/promutils/client.go new file mode 100644 index 0000000000..79a14cfe52 --- /dev/null +++ b/flytestdlib/promutils/client.go @@ -0,0 +1,80 @@ +package promutils + +import ( + "context" + "net/url" + "time" + + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/tools/metrics" +) + +func init() { + requestMetrics := newRequestMetricsProvider() + rateLimiterMetrics := newRateLimiterMetricsAdapter() + metrics.Register(metrics.RegisterOpts{ + RequestLatency: &requestMetrics, + RequestResult: &requestMetrics, + RateLimiterLatency: &rateLimiterMetrics, + }) +} + +var latencyBuckets = []float64{.0005, .001, .005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + +type requestMetricsProvider struct { + requestLatency *prometheus.HistogramVec + requestResult *prometheus.CounterVec +} + +func (r *requestMetricsProvider) Observe(ctx context.Context, verb string, _ url.URL, latency time.Duration) { + r.requestLatency.WithLabelValues(verb).Observe(latency.Seconds()) +} + +func (r *requestMetricsProvider) Increment(ctx context.Context, code string, method string, _ string) { + r.requestResult.WithLabelValues(code, method).Inc() +} + +func newRequestMetricsProvider() requestMetricsProvider { + requestLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "k8s_client_request_latency", + Help: "Kubernetes client request latency in seconds", + Buckets: latencyBuckets, + }, + []string{"verb"}) + prometheus.MustRegister(requestLatency) + requestResult := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "k8s_client_request_total", + Help: "Kubernetes client request total", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(requestResult) + return requestMetricsProvider{ + requestLatency, + requestResult, + } +} + +type rateLimiterMetricsProvider struct { + rateLimiterLatency *prometheus.HistogramVec +} + +func (r *rateLimiterMetricsProvider) Observe(ctx context.Context, verb string, _ url.URL, latency time.Duration) { + r.rateLimiterLatency.WithLabelValues(verb).Observe(latency.Seconds()) +} + +func newRateLimiterMetricsAdapter() rateLimiterMetricsProvider { + rateLimiterLatency := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "k8s_client_rate_limiter_latency", + Help: "Kubernetes client rate limiter latency in seconds", + Buckets: latencyBuckets, + }, + []string{"verb"}) + prometheus.MustRegister(rateLimiterLatency) + return rateLimiterMetricsProvider{ + rateLimiterLatency, + } +} diff --git a/flytestdlib/promutils/labeled/histogram_stopwatch.go b/flytestdlib/promutils/labeled/histogram_stopwatch.go new file mode 100644 index 0000000000..9beea73e63 --- /dev/null +++ b/flytestdlib/promutils/labeled/histogram_stopwatch.go @@ -0,0 +1,92 @@ +package labeled + +import ( + "context" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + + "github.com/flyteorg/flyte/flytestdlib/contextutils" + "github.com/flyteorg/flyte/flytestdlib/promutils" +) + +type HistogramStopWatch struct { + *promutils.HistogramStopWatchVec + promutils.HistogramStopWatch + + labels []contextutils.Key +} + +// Start creates a new Instance of the HistogramStopWatch called a Timer that is closeable/stoppable. +func (c HistogramStopWatch) Start(ctx context.Context) Timer { + w, err := c.HistogramStopWatchVec.GetMetricWith(contextutils.Values(ctx, c.labels...)) + if err != nil { + panic(err.Error()) + } + + if c.HistogramStopWatch.Observer == nil { + return w.Start() + } + + return timer{ + Timers: []Timer{ + w.Start(), + c.HistogramStopWatch.Start(), + }, + } +} + +// Observe observes specified duration between the start and end time. The data point will be labeled with values from context. +// See labeled.SetMetricsKeys for information about how to configure that. +func (c HistogramStopWatch) Observe(ctx context.Context, start, end time.Time) { + w, err := c.HistogramStopWatchVec.GetMetricWith(contextutils.Values(ctx, c.labels...)) + if err != nil { + panic(err.Error()) + } + w.Observe(start, end) + + if c.HistogramStopWatch.Observer != nil { + c.HistogramStopWatch.Observe(start, end) + } +} + +// Time observes the elapsed duration since the creation of the timer. The timer is created using a StopWatch. +// The data point will be labeled with values from context. See labeled.SetMetricsKeys for information about to +// configure that. +func (c HistogramStopWatch) Time(ctx context.Context, f func()) { + t := c.Start(ctx) + f() + t.Stop() +} + +// NewHistogramStopWatch creates a new labeled HistogramStopWatch. Label keys must be set before instantiating a counter. See labeled.SetMetricsKeys +// for information about how to configure that. +func NewHistogramStopWatch(name, description string, scope promutils.Scope, opts ...MetricOption) HistogramStopWatch { + if len(metricKeys) == 0 { + panic(ErrNeverSet) + } + + sw := HistogramStopWatch{} + + name = promutils.SanitizeMetricName(name) + for _, opt := range opts { + if _, emitUnableMetric := opt.(EmitUnlabeledMetricOption); emitUnableMetric { + sw.HistogramStopWatch = scope.MustNewHistogramStopWatch(GetUnlabeledMetricName(name), description) + } else if additionalLabels, casted := opt.(AdditionalLabelsOption); casted { + // compute unique labels + labelSet := sets.NewString(metricStringKeys...) + labelSet.Insert(additionalLabels.Labels...) + labels := labelSet.List() + + sw.HistogramStopWatchVec = scope.MustNewHistogramStopWatchVec(name, description, labels...) + sw.labels = contextutils.MetricKeysFromStrings(labels) + } + } + + if sw.HistogramStopWatchVec == nil { + sw.HistogramStopWatchVec = scope.MustNewHistogramStopWatchVec(name, description, metricStringKeys...) + sw.labels = metricKeys + } + + return sw +} diff --git a/flytestdlib/promutils/labeled/histogram_stopwatch_test.go b/flytestdlib/promutils/labeled/histogram_stopwatch_test.go new file mode 100644 index 0000000000..d4608f92c8 --- /dev/null +++ b/flytestdlib/promutils/labeled/histogram_stopwatch_test.go @@ -0,0 +1,372 @@ +package labeled + +import ( + "context" + "strconv" + "strings" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/common/expfmt" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/flyteorg/flyte/flytestdlib/contextutils" + "github.com/flyteorg/flyte/flytestdlib/promutils" +) + +func ExampleHistogramStopWatch_Start() { + ctx := context.Background() + stopWatch := NewHistogramStopWatch("test", "this is an example histogram stopwatch", promutils.NewTestScope()) + { + timer := stopWatch.Start(ctx) + defer timer.Stop() + + // An operation you want to measure the time for. + time.Sleep(time.Second) + } +} + +func TestLabeledHistogramStopWatch(t *testing.T) { + UnsetMetricKeys() + assert.NotPanics(t, func() { + SetMetricKeys(contextutils.ProjectKey, contextutils.DomainKey, contextutils.WorkflowIDKey, contextutils.TaskIDKey) + }) + + t.Run("Labeled", func(t *testing.T) { + scope := promutils.NewScope("testscope_hist_stopwatch") + s := NewHistogramStopWatch("s1", "some desc", scope) + assert.NotNil(t, s) + metricName := scope.CurrentScope() + "s1" + + ctx := context.TODO() + const header = ` + # HELP testscope_hist_stopwatch:s1 some desc + # TYPE testscope_hist_stopwatch:s1 histogram` + + w := s.Start(ctx) + w.Stop() + expectedMetrics := map[string]any{ + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s1_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s1_count{domain="",project="",task="",wf=""}`: 1, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + + ctx = contextutils.WithProjectDomain(ctx, "project", "domain") + w = s.Start(ctx) + w.Stop() + + expectedMetrics = map[string]any{ + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s1_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s1_count{domain="",project="",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s1_sum{domain="domain",project="project",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s1_count{domain="domain",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + + now := time.Now() + s.Observe(ctx, now, now.Add(time.Minute)) + + expectedMetrics = map[string]any{ + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s1_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s1_count{domain="",project="",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="+Inf"}`: 2, + `testscope_hist_stopwatch:s1_sum{domain="domain",project="project",task="",wf=""}`: 60.0, + `testscope_hist_stopwatch:s1_count{domain="domain",project="project",task="",wf=""}`: 2, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + + s.Time(ctx, func() { + // Do nothing + }) + + expectedMetrics = map[string]any{ + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s1_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s1_count{domain="",project="",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.005"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.01"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.025"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.05"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.1"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.25"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="0.5"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="1"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="2.5"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="5"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="10"}`: 2, + `testscope_hist_stopwatch:s1_bucket{domain="domain",project="project",task="",wf="",le="+Inf"}`: 3, + `testscope_hist_stopwatch:s1_sum{domain="domain",project="project",task="",wf=""}`: 60.0, + `testscope_hist_stopwatch:s1_count{domain="domain",project="project",task="",wf=""}`: 3, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + }) + + t.Run("Unlabeled", func(t *testing.T) { + scope := promutils.NewScope("testscope_hist_stopwatch") + s := NewHistogramStopWatch("s2", "some desc", scope, EmitUnlabeledMetric) + assert.NotNil(t, s) + + ctx := context.TODO() + const header = ` + # HELP testscope_hist_stopwatch:s2_unlabeled some desc + # TYPE testscope_hist_stopwatch:s2_unlabeled histogram` + + w := s.Start(ctx) + w.Stop() + expectedMetrics := map[string]any{ + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.005"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.01"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.025"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.05"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.1"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.25"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="0.5"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="1"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="2.5"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="5"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="10"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_bucket{le="+Inf"}`: 1, + `testscope_hist_stopwatch:s2_unlabeled_sum`: 0.0, + `testscope_hist_stopwatch:s2_unlabeled_count`: 1, + } + assertMetrics(t, s.HistogramStopWatch.Observer.(prometheus.Histogram), "testscope_hist_stopwatch:s2_unlabeled", header, expectedMetrics) + }) + + t.Run("AdditionalLabels", func(t *testing.T) { + scope := promutils.NewScope("testscope_hist_stopwatch") + opts := AdditionalLabelsOption{Labels: []string{contextutils.ProjectKey.String(), contextutils.ExecIDKey.String()}} + s := NewHistogramStopWatch("s3", "some desc", scope, opts) + assert.NotNil(t, s) + metricName := scope.CurrentScope() + "s3" + + ctx := context.TODO() + const header = ` + # HELP testscope_hist_stopwatch:s3 some desc + # TYPE testscope_hist_stopwatch:s3 histogram` + + w := s.Start(ctx) + w.Stop() + expectedMetrics := map[string]any{ + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + + ctx = contextutils.WithProjectDomain(ctx, "project", "domain") + w = s.Start(ctx) + w.Stop() + expectedMetrics = map[string]any{ + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="domain",exec_id="",project="project",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="domain",exec_id="",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + + ctx = contextutils.WithExecutionID(ctx, "exec_id") + w = s.Start(ctx) + w.Stop() + expectedMetrics = map[string]any{ + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="",exec_id="",project="",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="",project="project",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="domain",exec_id="",project="project",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="domain",exec_id="",project="project",task="",wf=""}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.005"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.01"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.025"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.05"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.25"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="0.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="1"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="2.5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="5"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="10"}`: 1, + `testscope_hist_stopwatch:s3_bucket{domain="domain",exec_id="exec_id",project="project",task="",wf="",le="+Inf"}`: 1, + `testscope_hist_stopwatch:s3_sum{domain="domain",exec_id="exec_id",project="project",task="",wf=""}`: 0.0, + `testscope_hist_stopwatch:s3_count{domain="domain",exec_id="exec_id",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.HistogramStopWatchVec, metricName, header, expectedMetrics) + }) +} + +func assertMetrics(t *testing.T, c prometheus.Collector, metricName, expectedHeader string, expectedMetrics map[string]any) { + t.Helper() + metricBytes, err := testutil.CollectAndFormat(c, expfmt.TypeTextPlain, metricName) + require.NoError(t, err) + require.NotEmptyf(t, metricBytes, "empty `%q` metric", metricName) + + actual := strings.Split(strings.TrimSpace(string(metricBytes)), "\n") + n := len(actual) + + expected := strings.Split(strings.TrimSpace(expectedHeader), "\n") + require.Len(t, expected, 2, "wrong number of expected header lines") + + for i := 0; i < n; i++ { + line := actual[i] + + if strings.HasPrefix(line, "#") { + if i != 0 && i != 1 { + require.Failf(t, "wrong format", "comment line %q on wrong place", line) + } + assert.Equal(t, strings.TrimSpace(expected[i]), actual[i]) + continue + } + + lineSplt := strings.Split(line, " ") + if len(lineSplt) != 2 { + require.Failf(t, "metric line has wrong format", "metric %s has line %q with wrong format", metricName, line) + } + + key := lineSplt[0] + expectedValue, ok := expectedMetrics[key] + require.Truef(t, ok, "missing expected %q metric", key) + + switch expectedValue.(type) { + case int, int8, int16, int32, int64: + actualValue, err := strconv.Atoi(lineSplt[1]) + require.NoError(t, err) + assert.Equal(t, expectedValue, actualValue) + case float32, float64: + actualValue, err := strconv.ParseFloat(lineSplt[1], 64) + require.NoError(t, err) + assert.InDeltaf(t, expectedValue, actualValue, 0.001, "metric %q has wrong value", key) + assert.Greaterf(t, actualValue, expectedValue, "actual value of %q should be slightly greater than expected", key) + default: + require.Fail(t, "unsupported expected value type") + } + } +} diff --git a/flytestdlib/promutils/labeled/stopwatch_test.go b/flytestdlib/promutils/labeled/stopwatch_test.go index 640ea94222..4d94caabf7 100644 --- a/flytestdlib/promutils/labeled/stopwatch_test.go +++ b/flytestdlib/promutils/labeled/stopwatch_test.go @@ -2,11 +2,10 @@ package labeled import ( "context" - "strings" "testing" "time" - "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/prometheus/client_golang/prometheus" "github.com/stretchr/testify/assert" "github.com/flyteorg/flyte/flytestdlib/contextutils" @@ -35,77 +34,73 @@ func TestLabeledStopWatch(t *testing.T) { scope := promutils.NewScope("testscope_stopwatch") s := NewStopWatch("s1", "some desc", time.Minute, scope) assert.NotNil(t, s) + metricName := scope.CurrentScope() + "s1_m" ctx := context.TODO() const header = ` # HELP testscope_stopwatch:s1_m some desc - # TYPE testscope_stopwatch:s1_m summary - ` + # TYPE testscope_stopwatch:s1_m summary` w := s.Start(ctx) w.Stop() - var expected = ` - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""} 0 - testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""} 1 - ` - err := testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics := map[string]any{ + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""}`: 1, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) ctx = contextutils.WithProjectDomain(ctx, "project", "domain") w = s.Start(ctx) w.Stop() - expected = ` - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""} 0 - testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""} 1 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""} 0 - testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""} 1 - ` - err = testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics = map[string]any{ + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""}`: 1, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""}`: 0.0, + `testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) now := time.Now() s.Observe(ctx, now, now.Add(time.Minute)) - expected = ` - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""} 0 - testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""} 1 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"} 1 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"} 1 - testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""} 1 - testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""} 2 - ` - err = testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics = map[string]any{ + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""}`: 1, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"}`: 1, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"}`: 1, + `testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""}`: 1.0, + `testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""}`: 2, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) s.Time(ctx, func() { // Do nothing }) - expected = ` - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""} 0 - testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""} 1 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"} 1 - testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"} 1 - testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""} 1 - testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""} 3 - ` - err = testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics = map[string]any{ + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s1_m{domain="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s1_m_sum{domain="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s1_m_count{domain="",project="",task="",wf=""}`: 1, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.9"}`: 1, + `testscope_stopwatch:s1_m{domain="domain",project="project",task="",wf="",quantile="0.99"}`: 1, + `testscope_stopwatch:s1_m_sum{domain="domain",project="project",task="",wf=""}`: 1.0, + `testscope_stopwatch:s1_m_count{domain="domain",project="project",task="",wf=""}`: 3, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) }) t.Run("Unlabeled", func(t *testing.T) { @@ -114,23 +109,21 @@ func TestLabeledStopWatch(t *testing.T) { assert.NotNil(t, s) ctx := context.TODO() - /*const header = ` - # HELP testscope_stopwatch:s2_m some desc - # TYPE testscope_stopwatch:s2_m summary - `*/ + const header = ` + # HELP testscope_stopwatch:s2_unlabeled_m some desc + # TYPE testscope_stopwatch:s2_unlabeled_m summary + ` w := s.Start(ctx) w.Stop() - // promutils.StopWatch does not implement prometheus.Collector - /*var expected = ` - testscope_stopwatch:s2_m{quantile="0.5"} 0 - testscope_stopwatch:s2_m{quantile="0.9"} 0 - testscope_stopwatch:s2_m{quantile="0.99"} 0 - testscope_stopwatch:s2_m_sum 0 - testscope_stopwatch:s2_m_count 1 - ` - err := testutil.CollectAndCompare(s.StopWatch, strings.NewReader(header+expected)) - assert.NoError(t, err)*/ + expectedMetrics := map[string]any{ + `testscope_stopwatch:s2_unlabeled_m{quantile="0.5"}`: 0.0, + `testscope_stopwatch:s2_unlabeled_m{quantile="0.9"}`: 0.0, + `testscope_stopwatch:s2_unlabeled_m{quantile="0.99"}`: 0.0, + `testscope_stopwatch:s2_unlabeled_m_sum`: 0.0, + `testscope_stopwatch:s2_unlabeled_m_count`: 1, + } + assertMetrics(t, s.StopWatch.Observer.(prometheus.Summary), "testscope_stopwatch:s2_unlabeled_m", header, expectedMetrics) }) t.Run("AdditionalLabels", func(t *testing.T) { @@ -138,6 +131,7 @@ func TestLabeledStopWatch(t *testing.T) { opts := AdditionalLabelsOption{Labels: []string{contextutils.ProjectKey.String(), contextutils.ExecIDKey.String()}} s := NewStopWatch("s3", "some desc", time.Minute, scope, opts) assert.NotNil(t, s) + metricName := scope.CurrentScope() + "s3_m" ctx := context.TODO() const header = ` @@ -147,55 +141,52 @@ func TestLabeledStopWatch(t *testing.T) { w := s.Start(ctx) w.Stop() - var expected = ` - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""} 1 - ` - err := testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics := map[string]any{ + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) ctx = contextutils.WithProjectDomain(ctx, "project", "domain") w = s.Start(ctx) w.Stop() - expected = ` - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""} 1 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="domain",exec_id="",project="project",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="domain",exec_id="",project="project",task="",wf=""} 1 - ` - err = testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics = map[string]any{ + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="domain",exec_id="",project="project",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="domain",exec_id="",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) ctx = contextutils.WithExecutionID(ctx, "exec_id") w = s.Start(ctx) w.Stop() - expected = ` - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""} 1 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="domain",exec_id="",project="project",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="domain",exec_id="",project="project",task="",wf=""} 1 - testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.5"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.9"} 0 - testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.99"} 0 - testscope_stopwatch:s3_m_sum{domain="domain",exec_id="exec_id",project="project",task="",wf=""} 0 - testscope_stopwatch:s3_m_count{domain="domain",exec_id="exec_id",project="project",task="",wf=""} 1 - ` - err = testutil.CollectAndCompare(s.StopWatchVec, strings.NewReader(header+expected)) - assert.NoError(t, err) + expectedMetrics = map[string]any{ + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="",exec_id="",project="",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="",exec_id="",project="",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="",exec_id="",project="",task="",wf=""}`: 1, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="",project="project",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="domain",exec_id="",project="project",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="domain",exec_id="",project="project",task="",wf=""}`: 1, + `testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.5"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.9"}`: 0.0, + `testscope_stopwatch:s3_m{domain="domain",exec_id="exec_id",project="project",task="",wf="",quantile="0.99"}`: 0.0, + `testscope_stopwatch:s3_m_sum{domain="domain",exec_id="exec_id",project="project",task="",wf=""}`: 0.0, + `testscope_stopwatch:s3_m_count{domain="domain",exec_id="exec_id",project="project",task="",wf=""}`: 1, + } + assertMetrics(t, s.StopWatchVec, metricName, header, expectedMetrics) }) } diff --git a/flytestdlib/promutils/scope.go b/flytestdlib/promutils/scope.go index 64d1abaeba..1e3d027901 100644 --- a/flytestdlib/promutils/scope.go +++ b/flytestdlib/promutils/scope.go @@ -86,6 +86,42 @@ func (s StopWatchVec) GetMetricWith(labels prometheus.Labels) (StopWatch, error) }, nil } +// HistogramStopWatch implements a stopwatch style interface that works with prometheus histogram +// NOTE: Do not create a HistogramStopWatch object by hand, use a Scope to get a new instance of the StopWatch object +type HistogramStopWatch struct { + StopWatch +} + +// HistogramStopWatchVec implements a stopwatch style interface that works with prometheus histogram +// NOTE: Do not create a HistogramStopWatchVec object by hand, use a Scope to get a new instance of the StopWatch object +type HistogramStopWatchVec struct { + *prometheus.HistogramVec + outputScale time.Duration +} + +// Gets a concrete StopWatch instance that can be used to start a timer and record observations. +func (h HistogramStopWatchVec) WithLabelValues(values ...string) HistogramStopWatch { + return HistogramStopWatch{ + StopWatch: StopWatch{ + Observer: h.HistogramVec.WithLabelValues(values...), + outputScale: h.outputScale, + }, + } +} + +func (h HistogramStopWatchVec) GetMetricWith(labels prometheus.Labels) (HistogramStopWatch, error) { + hVec, err := h.HistogramVec.GetMetricWith(labels) + if err != nil { + return HistogramStopWatch{}, err + } + return HistogramStopWatch{ + StopWatch{ + Observer: hVec, + outputScale: h.outputScale, + }, + }, nil +} + // Timer is a stoppable instance of a StopWatch or a Timer // A Timer can only be stopped. On stopping it will output the elapsed duration to prometheus type Timer struct { @@ -102,7 +138,7 @@ func (s Timer) Stop() float64 { s.timer.Observe(0) return 0 } - scaled := float64(observed / outputScaleDuration) + scaled := float64(observed) / float64(outputScaleDuration) s.timer.Observe(scaled) return scaled } @@ -114,6 +150,12 @@ type SummaryOptions struct { Objectives map[float64]float64 } +// A HistogramOptions represent buckets to specify for a histogram vector when creating a new prometheus histogram +type HistogramOptions struct { + // Buckets is a list of pre-determined buckets for the histogram + Buckets []float64 +} + // A Scope represents a prefix in Prometheus. It is nestable, thus every metric that is published does not need to // provide a prefix, but just the name of the metric. As long as the Scope is used to create a new instance of the metric // The prefix (or scope) is automatically set. @@ -154,6 +196,12 @@ type Scope interface { NewHistogramVec(name, description string, labelNames ...string) (*prometheus.HistogramVec, error) MustNewHistogramVec(name, description string, labelNames ...string) *prometheus.HistogramVec + // NewHistogramVecWithOptions creates new prometheus.HistogramVec metric with the prefix as the CurrentScope + // with a custom set of options, such as of buckets. + // Refer to https://prometheus.io/docs/concepts/metric_types/ for more information + NewHistogramVecWithOptions(name, description string, options HistogramOptions, labelNames ...string) (*prometheus.HistogramVec, error) + MustNewHistogramVecWithOptions(name, description string, options HistogramOptions, labelNames ...string) *prometheus.HistogramVec + // NewCounter creates new prometheus.Counter metric with the prefix as the CurrentScope // Refer to https://prometheus.io/docs/concepts/metric_types/ for more information // Important to note, counters are not like typical counters. These are ever increasing and cumulative. @@ -182,6 +230,20 @@ type Scope interface { NewStopWatchVec(name, description string, scale time.Duration, labelNames ...string) (*StopWatchVec, error) MustNewStopWatchVec(name, description string, scale time.Duration, labelNames ...string) *StopWatchVec + // NewHistogramStopWatch is a custom wrapper to create a HistogramStopWatch object in the current Scope. + // Unlike a StopWatch, a HistogramStopWatch can be aggregated across instances. Quantiles are computed server side. + // See https://prometheus.io/docs/practices/histograms/#quantiles for tradeoffs. + // Scale is assumed to be seconds with buckets spanning 0.005s to 10s. + NewHistogramStopWatch(name, description string) (HistogramStopWatch, error) + MustNewHistogramStopWatch(name, description string) HistogramStopWatch + + // NewHistogramStopWatchVec is a custom wrapper to create a HistogramStopWatchVec object in the current Scope. + // Unlike a StopWatchVec, a HistogramStopWatchVec can be aggregated across instances. Quantiles are computed server side. + // See https://prometheus.io/docs/practices/histograms/#quantiles for tradeoffs. + // Scale is assumed to be seconds with buckets spanning 0.005s to 10s. + NewHistogramStopWatchVec(name, description string, labelNames ...string) (*HistogramStopWatchVec, error) + MustNewHistogramStopWatchVec(name, description string, labelNames ...string) *HistogramStopWatchVec + // NewSubScope creates a new subScope in case nesting is desired for metrics. This is generally useful in creating // Scoped and SubScoped metrics NewSubScope(name string) Scope @@ -311,6 +373,25 @@ func (m metricsScope) MustNewHistogramVec(name, description string, labelNames . return h } +func (m metricsScope) NewHistogramVecWithOptions(name, description string, options HistogramOptions, labelNames ...string) (*prometheus.HistogramVec, error) { + h := prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Name: m.NewScopedMetricName(name), + Help: description, + Buckets: options.Buckets, + }, + labelNames, + ) + + return h, prometheus.Register(h) +} + +func (m metricsScope) MustNewHistogramVecWithOptions(name, description string, options HistogramOptions, labelNames ...string) *prometheus.HistogramVec { + h, err := m.NewHistogramVecWithOptions(name, description, options, labelNames...) + panicIfError(err) + return h +} + func (m metricsScope) NewCounter(name, description string) (prometheus.Counter, error) { c := prometheus.NewCounter( prometheus.CounterOpts{ @@ -388,6 +469,44 @@ func (m metricsScope) MustNewStopWatchVec(name, description string, scale time.D return s } +func (m metricsScope) NewHistogramStopWatch(name, description string) (HistogramStopWatch, error) { + h, err := m.NewHistogram(name, description) + if err != nil { + return HistogramStopWatch{}, err + } + + return HistogramStopWatch{ + StopWatch: StopWatch{ + Observer: h, + outputScale: time.Second, + }, + }, nil +} + +func (m metricsScope) MustNewHistogramStopWatch(name, description string) HistogramStopWatch { + s, err := m.NewHistogramStopWatch(name, description) + panicIfError(err) + return s +} + +func (m metricsScope) NewHistogramStopWatchVec(name, description string, labelNames ...string) (*HistogramStopWatchVec, error) { + h, err := m.NewHistogramVec(name, description, labelNames...) + if err != nil { + return &HistogramStopWatchVec{}, err + } + + return &HistogramStopWatchVec{ + HistogramVec: h, + outputScale: time.Second, + }, nil +} + +func (m metricsScope) MustNewHistogramStopWatchVec(name, description string, labelNames ...string) *HistogramStopWatchVec { + h, err := m.NewHistogramStopWatchVec(name, description, labelNames...) + panicIfError(err) + return h +} + func (m metricsScope) CurrentScope() string { return m.scope } diff --git a/flytestdlib/promutils/scope_test.go b/flytestdlib/promutils/scope_test.go index 9d5a3071d8..6f5aff61bf 100644 --- a/flytestdlib/promutils/scope_test.go +++ b/flytestdlib/promutils/scope_test.go @@ -60,7 +60,7 @@ func TestMetricsScope(t *testing.T) { } t.Run("Counter", func(t *testing.T) { m := s.MustNewCounter("xc", description) - assert.Equal(t, `Desc{fqName: "test:xc", help: "some x", constLabels: {}, variableLabels: []}`, m.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xc", help: "some x", constLabels: {}, variableLabels: {}}`, m.Desc().String()) mv := s.MustNewCounterVec("xcv", description) assert.NotNil(t, mv) assert.Panics(t, func() { @@ -73,7 +73,7 @@ func TestMetricsScope(t *testing.T) { t.Run("Histogram", func(t *testing.T) { m := s.MustNewHistogram("xh", description) - assert.Equal(t, `Desc{fqName: "test:xh", help: "some x", constLabels: {}, variableLabels: []}`, m.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xh", help: "some x", constLabels: {}, variableLabels: {}}`, m.Desc().String()) mv := s.MustNewHistogramVec("xhv", description) assert.NotNil(t, mv) assert.Panics(t, func() { @@ -82,14 +82,20 @@ func TestMetricsScope(t *testing.T) { assert.Panics(t, func() { _ = s.MustNewHistogramVec("xhv", description) }) + buckets := []float64{1.0, 2.0, 3.0, 4.0, 5.0, 6.0} + mvo := s.MustNewHistogramVecWithOptions("xho", description, HistogramOptions{Buckets: buckets}) + assert.NotNil(t, mvo) + assert.Panics(t, func() { + _ = s.MustNewHistogramVecWithOptions("xho", description, HistogramOptions{Buckets: buckets}) + }) }) t.Run("Summary", func(t *testing.T) { m := s.MustNewSummary("xs", description) - assert.Equal(t, `Desc{fqName: "test:xs", help: "some x", constLabels: {}, variableLabels: []}`, m.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xs", help: "some x", constLabels: {}, variableLabels: {}}`, m.Desc().String()) mco, err := s.NewSummaryWithOptions("xsco", description, SummaryOptions{Objectives: map[float64]float64{0.5: 0.05, 1.0: 0.0}}) assert.Nil(t, err) - assert.Equal(t, `Desc{fqName: "test:xsco", help: "some x", constLabels: {}, variableLabels: []}`, mco.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xsco", help: "some x", constLabels: {}, variableLabels: {}}`, mco.Desc().String()) mv := s.MustNewSummaryVec("xsv", description) assert.NotNil(t, mv) assert.Panics(t, func() { @@ -102,7 +108,7 @@ func TestMetricsScope(t *testing.T) { t.Run("Gauge", func(t *testing.T) { m := s.MustNewGauge("xg", description) - assert.Equal(t, `Desc{fqName: "test:xg", help: "some x", constLabels: {}, variableLabels: []}`, m.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xg", help: "some x", constLabels: {}, variableLabels: {}}`, m.Desc().String()) mv := s.MustNewGaugeVec("xgv", description) assert.NotNil(t, mv) assert.Panics(t, func() { @@ -117,7 +123,7 @@ func TestMetricsScope(t *testing.T) { m := s.MustNewStopWatch("xt", description, time.Second) asDesc, ok := m.Observer.(prometheus.Metric) assert.True(t, ok) - assert.Equal(t, `Desc{fqName: "test:xt_s", help: "some x", constLabels: {}, variableLabels: []}`, asDesc.Desc().String()) + assert.Equal(t, `Desc{fqName: "test:xt_s", help: "some x", constLabels: {}, variableLabels: {}}`, asDesc.Desc().String()) assert.Panics(t, func() { _ = s.MustNewStopWatch("xt", description, time.Second) }) @@ -165,3 +171,43 @@ func TestStopWatchVec_WithLabelValues(t *testing.T) { assert.NotNil(t, i.start) i.Stop() } + +func TestHistogramStopWatch_Start(t *testing.T) { + scope := NewTestScope() + stopwatch, err := scope.NewHistogramStopWatch("yt"+rand.String(3), "timer") + assert.NoError(t, err) + assert.Equal(t, time.Second, stopwatch.outputScale) + timer := stopwatch.Start() + assert.Equal(t, time.Second, timer.outputScale) + assert.NotNil(t, timer.start) +} + +func TestHistogramStopWatch_Observe(t *testing.T) { + scope := NewTestScope() + stopwatch, err := scope.NewHistogramStopWatch("yt"+rand.String(3), "timer") + assert.NoError(t, err) + assert.Equal(t, time.Second, stopwatch.outputScale) + stopwatch.Observe(time.Now(), time.Now().Add(time.Second)) +} + +func TestHistogramStopWatch_Time(t *testing.T) { + scope := NewTestScope() + stopwatch, err := scope.NewHistogramStopWatch("yt"+rand.String(3), "timer") + assert.NoError(t, err) + assert.Equal(t, time.Second, stopwatch.outputScale) + stopwatch.Time(func() { + }) +} + +func TestHistogramStopWatchVec_WithLabelValues(t *testing.T) { + scope := NewTestScope() + vec, err := scope.NewHistogramStopWatchVec("yt"+rand.String(3), "timer", "workflow", "label") + assert.NoError(t, err) + assert.Equal(t, time.Second, vec.outputScale) + stopwatch := vec.WithLabelValues("my_wf", "something") + assert.NotNil(t, stopwatch) + i := stopwatch.Start() + assert.Equal(t, time.Second, i.outputScale) + assert.NotNil(t, i.start) + i.Stop() +} diff --git a/flytestdlib/storage/mem_store.go b/flytestdlib/storage/mem_store.go index 94083f6646..d9da9b5b1e 100644 --- a/flytestdlib/storage/mem_store.go +++ b/flytestdlib/storage/mem_store.go @@ -9,13 +9,15 @@ import ( "io" "io/ioutil" "os" + "sync" ) type rawFile = []byte type InMemoryStore struct { copyImpl - cache map[DataReference]rawFile + cache map[DataReference]rawFile + rwMutex sync.RWMutex } type MemoryMetadata struct { @@ -42,6 +44,9 @@ func (m MemoryMetadata) ContentMD5() string { } func (s *InMemoryStore) Head(ctx context.Context, reference DataReference) (Metadata, error) { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + data, found := s.cache[reference] var hash [md5.Size]byte if found { @@ -59,6 +64,9 @@ func (s *InMemoryStore) List(ctx context.Context, reference DataReference, maxIt } func (s *InMemoryStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { + s.rwMutex.RLock() + defer s.rwMutex.RUnlock() + if raw, found := s.cache[reference]; found { return ioutil.NopCloser(bytes.NewReader(raw)), nil } @@ -68,6 +76,9 @@ func (s *InMemoryStore) ReadRaw(ctx context.Context, reference DataReference) (i // Delete removes the referenced data from the cache map. func (s *InMemoryStore) Delete(ctx context.Context, reference DataReference) error { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + if _, found := s.cache[reference]; !found { return os.ErrNotExist } @@ -79,6 +90,8 @@ func (s *InMemoryStore) Delete(ctx context.Context, reference DataReference) err func (s *InMemoryStore) WriteRaw(ctx context.Context, reference DataReference, size int64, opts Options, raw io.Reader) ( err error) { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() rawBytes, err := ioutil.ReadAll(raw) if err != nil { @@ -90,6 +103,9 @@ func (s *InMemoryStore) WriteRaw(ctx context.Context, reference DataReference, s } func (s *InMemoryStore) Clear(ctx context.Context) error { + s.rwMutex.Lock() + defer s.rwMutex.Unlock() + s.cache = map[DataReference]rawFile{} return nil } diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index c1950c10de..e86a199bda 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -89,20 +89,25 @@ type stowMetrics struct { BadReference labeled.Counter BadContainer labeled.Counter - HeadFailure labeled.Counter - HeadLatency labeled.StopWatch + HeadFailure labeled.Counter + HeadLatency labeled.StopWatch + HeadLatencyHist labeled.HistogramStopWatch - ListFailure labeled.Counter - ListLatency labeled.StopWatch + ListFailure labeled.Counter + ListLatency labeled.StopWatch + ListLatencyHist labeled.HistogramStopWatch - ReadFailure labeled.Counter - ReadOpenLatency labeled.StopWatch + ReadFailure labeled.Counter + ReadOpenLatency labeled.StopWatch + ReadOpenLatencyHist labeled.HistogramStopWatch - WriteFailure labeled.Counter - WriteLatency labeled.StopWatch + WriteFailure labeled.Counter + WriteLatency labeled.StopWatch + WriteLatencyHist labeled.HistogramStopWatch - DeleteFailure labeled.Counter - DeleteLatency labeled.StopWatch + DeleteFailure labeled.Counter + DeleteLatency labeled.StopWatch + DeleteLatencyHist labeled.HistogramStopWatch } // StowMetadata that will be returned @@ -220,8 +225,12 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata return nil, err } - t := s.metrics.HeadLatency.Start(ctx) + t1 := s.metrics.HeadLatency.Start(ctx) + t2 := s.metrics.HeadLatencyHist.Start(ctx) item, err := container.Item(k) + t1.Stop() + t2.Stop() + if err == nil { if _, err = item.Metadata(); err != nil { // Err will be caught below @@ -232,7 +241,6 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata } else if metadata, err := item.Metadata(); err != nil { // Err will be caught below } else { - t.Stop() contentMD5, ok := metadata[strings.ToLower(FlyteContentMD5)].(string) if !ok { logger.Infof(ctx, "Failed to cast contentMD5 [%v] to string", contentMD5) @@ -266,7 +274,8 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems return nil, NewCursorAtEnd(), err } - t := s.metrics.ListLatency.Start(ctx) + t1 := s.metrics.ListLatency.Start(ctx) + t2 := s.metrics.ListLatencyHist.Start(ctx) var stowCursor string if cursor.cursorState == AtStartCursorState { stowCursor = stow.CursorStart @@ -276,6 +285,9 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems stowCursor = cursor.customPosition } items, stowCursor, err := container.Items(key, stowCursor, maxItems) + t1.Stop() + t2.Stop() + if err == nil { results := make([]DataReference, len(items)) for index, item := range items { @@ -286,7 +298,6 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems } else { cursor = NewCursorFromCustomPosition(stowCursor) } - t.Stop() return results, cursor, nil } @@ -306,13 +317,16 @@ func (s *StowStore) ReadRaw(ctx context.Context, reference DataReference) (io.Re return nil, err } - t := s.metrics.ReadOpenLatency.Start(ctx) + t1 := s.metrics.ReadOpenLatency.Start(ctx) + t2 := s.metrics.ReadOpenLatencyHist.Start(ctx) item, err := container.Item(k) + t1.Stop() + t2.Stop() + if err != nil { incFailureCounterForError(ctx, s.metrics.ReadFailure, err) return nil, err } - t.Stop() sizeBytes, err := item.Size() if err != nil { @@ -340,8 +354,12 @@ func (s *StowStore) WriteRaw(ctx context.Context, reference DataReference, size return err } - t := s.metrics.WriteLatency.Start(ctx) + t1 := s.metrics.WriteLatency.Start(ctx) + t2 := s.metrics.WriteLatencyHist.Start(ctx) _, err = container.Put(k, raw, size, opts.Metadata) + t1.Stop() + t2.Stop() + if err != nil { // If this error is due to the bucket not existing, first attempt to create it and retry the getContainer call. if IsNotFound(err) || awsBucketIsNotFound(err) { @@ -356,8 +374,6 @@ func (s *StowStore) WriteRaw(ctx context.Context, reference DataReference, size } } - t.Stop() - return nil } @@ -374,8 +390,8 @@ func (s *StowStore) Delete(ctx context.Context, reference DataReference) error { return err } - t := s.metrics.DeleteLatency.Start(ctx) - defer t.Stop() + defer s.metrics.DeleteLatency.Start(ctx).Stop() + defer s.metrics.DeleteLatencyHist.Start(ctx).Stop() if err := container.RemoveItem(k); err != nil { incFailureCounterForError(ctx, s.metrics.DeleteFailure, err) @@ -474,20 +490,25 @@ func newStowMetrics(scope promutils.Scope) *stowMetrics { BadReference: labeled.NewCounter("bad_key", "Indicates the provided storage reference/key is incorrectly formatted", scope, labeled.EmitUnlabeledMetric), BadContainer: labeled.NewCounter("bad_container", "Indicates request for a container that has not been initialized", scope, labeled.EmitUnlabeledMetric), - HeadFailure: labeled.NewCounter("head_failure", "Indicates failure in HEAD for a given reference", scope, labeled.EmitUnlabeledMetric), - HeadLatency: labeled.NewStopWatch("head", "Indicates time to fetch metadata using the Head API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + HeadFailure: labeled.NewCounter("head_failure", "Indicates failure in HEAD for a given reference", scope, labeled.EmitUnlabeledMetric), + HeadLatency: labeled.NewStopWatch("head", "Indicates time to fetch metadata using the Head API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + HeadLatencyHist: labeled.NewHistogramStopWatch("head", "Indicates time to fetch metadata using the Head API", scope, labeled.EmitUnlabeledMetric), - ListFailure: labeled.NewCounter("list_failure", "Indicates failure in item listing for a given reference", scope, labeled.EmitUnlabeledMetric), - ListLatency: labeled.NewStopWatch("list", "Indicates time to fetch item listing using the List API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ListFailure: labeled.NewCounter("list_failure", "Indicates failure in item listing for a given reference", scope, labeled.EmitUnlabeledMetric), + ListLatency: labeled.NewStopWatch("list", "Indicates time to fetch item listing using the List API", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ListLatencyHist: labeled.NewHistogramStopWatch("list", "Indicates time to fetch item listing using the List API", scope, labeled.EmitUnlabeledMetric), - ReadFailure: labeled.NewCounter("read_failure", "Indicates failure in GET for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), - ReadOpenLatency: labeled.NewStopWatch("read_open", "Indicates time to first byte when reading", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ReadFailure: labeled.NewCounter("read_failure", "Indicates failure in GET for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), + ReadOpenLatency: labeled.NewStopWatch("read_open", "Indicates time to first byte when reading", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + ReadOpenLatencyHist: labeled.NewHistogramStopWatch("read_open", "Indicates time to first byte when reading", scope, labeled.EmitUnlabeledMetric), - WriteFailure: labeled.NewCounter("write_failure", "Indicates failure in storing/PUT for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), - WriteLatency: labeled.NewStopWatch("write", "Time to write an object irrespective of size", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + WriteFailure: labeled.NewCounter("write_failure", "Indicates failure in storing/PUT for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), + WriteLatency: labeled.NewStopWatch("write", "Time to write an object irrespective of size", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + WriteLatencyHist: labeled.NewHistogramStopWatch("write", "Time to write an object irrespective of size", scope, labeled.EmitUnlabeledMetric), - DeleteFailure: labeled.NewCounter("delete_failure", "Indicates failure in removing/DELETE for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), - DeleteLatency: labeled.NewStopWatch("delete", "Time to delete an object irrespective of size", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + DeleteFailure: labeled.NewCounter("delete_failure", "Indicates failure in removing/DELETE for a given reference", scope, labeled.EmitUnlabeledMetric, failureTypeOption), + DeleteLatency: labeled.NewStopWatch("delete", "Time to delete an object irrespective of size", time.Millisecond, scope, labeled.EmitUnlabeledMetric), + DeleteLatencyHist: labeled.NewHistogramStopWatch("delete", "Time to delete an object irrespective of size", scope, labeled.EmitUnlabeledMetric), } } diff --git a/go.mod b/go.mod index 5b59c17763..776eb1abc8 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/flyteorg/flyte/flytepropeller v0.0.0-00010101000000-000000000000 github.com/flyteorg/flyte/flytestdlib v0.0.0-00010101000000-000000000000 github.com/golang/glog v1.2.0 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.19.1 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 golang.org/x/sync v0.7.0 @@ -48,7 +48,7 @@ require ( github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudevents/sdk-go/binding/format/protobuf/v2 v2.14.0 // indirect github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.8.0 // indirect github.com/cloudevents/sdk-go/v2 v2.15.2 // indirect @@ -58,8 +58,8 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.0.3 // indirect - github.com/eapache/go-resiliency v1.2.0 // indirect - github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect + github.com/eapache/go-resiliency v1.3.0 // indirect + github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect @@ -102,7 +102,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.1-0.20210315223345-82c243799c99 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 // indirect github.com/gtank/cryptopasta v0.0.0-20170601214702-1f550f6f2f69 // indirect - github.com/hashicorp/go-uuid v1.0.2 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/imdario/mergo v0.3.13 // indirect @@ -115,14 +115,14 @@ require ( github.com/jackc/pgservicefile v0.0.0-20231201235250-de7065d80cb9 // indirect github.com/jackc/pgx/v5 v5.5.5 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect - github.com/jcmturner/gofork v1.0.0 // indirect + github.com/jcmturner/gofork v1.7.6 // indirect github.com/jinzhu/inflection v1.0.0 // indirect github.com/jinzhu/now v1.1.5 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect - github.com/klauspost/compress v1.9.8 // indirect + github.com/klauspost/compress v1.17.8 // indirect github.com/kubeflow/common v0.4.3 // indirect github.com/kubeflow/training-operator v1.5.0-rc.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -138,7 +138,6 @@ require ( github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-sqlite3 v2.0.3+incompatible // indirect github.com/mattn/goveralls v0.0.6 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -158,12 +157,13 @@ require ( github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.53.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 // indirect - github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/robfig/cron/v3 v3.0.0 // indirect + github.com/samber/lo v1.47.0 // indirect github.com/sendgrid/rest v2.6.9+incompatible // indirect github.com/sendgrid/sendgrid-go v3.10.0+incompatible // indirect github.com/shamaton/msgpack/v2 v2.2.2 // indirect @@ -198,7 +198,7 @@ require ( golang.org/x/crypto v0.25.0 // indirect golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 // indirect golang.org/x/net v0.27.0 // indirect - golang.org/x/oauth2 v0.16.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sys v0.22.0 // indirect golang.org/x/term v0.22.0 // indirect golang.org/x/text v0.16.0 // indirect @@ -211,7 +211,7 @@ require ( google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect google.golang.org/grpc v1.62.1 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect gopkg.in/jcmturner/aescts.v1 v1.0.1 // indirect diff --git a/go.sum b/go.sum index cdb119abe5..59ffb4358b 100644 --- a/go.sum +++ b/go.sum @@ -156,8 +156,8 @@ github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927 h1:SKI1/fuSdodxmNNyVBR8d7X/HuLnRpvvFO0AgyQk764= github.com/cheekybits/is v0.0.0-20150225183255-68e9c0620927/go.mod h1:h/aW8ynjgkuj+NQRlZcDbAbM1ORAbXjXX77sX7T289U= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -229,10 +229,12 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v0.0.0-20180713052910-9f541cc9db5d/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= +github.com/eapache/go-resiliency v1.3.0 h1:RRL0nge+cWGlxXbUzJ7yMcq6w2XBEr19dCN6HECGaT0= +github.com/eapache/go-resiliency v1.3.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6 h1:8yY/I9ndfrgrXUbOGObLHKBR4Fl3nZXwM2c7OYTT8hM= +github.com/eapache/go-xerial-snappy v0.0.0-20230111030713-bf00bc1b83b6/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -779,8 +781,9 @@ github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -849,8 +852,9 @@ github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jandelgado/gcov2lcov v1.0.4-0.20210120124023-b83752c6dc08/go.mod h1:NnSxK6TMlg1oGDBfGelGbjgorT5/L3cchlbtgFYZSss= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= -github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg= +github.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= @@ -901,8 +905,9 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU= +github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= github.com/knadh/koanf v0.14.1-0.20201201075439-e0853799f9ec/go.mod h1:H5mEFsTeWizwFXHKtsITL5ipsLTuAMQoGuQpp+1JL9U= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -1010,8 +1015,6 @@ github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpe github.com/mattn/goveralls v0.0.6 h1:cr8Y0VMo/MnEZBjxNN/vh6G90SZ7IMb6lms1dzMoO+Y= github.com/mattn/goveralls v0.0.6/go.mod h1:h8b4ow6FxSPMQHF6o2ve3qsclnffZjYTNEKmLesRwqw= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= @@ -1144,34 +1147,35 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= +github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 h1:skD8MXnQMO3QGUeTKt09VOXvuch/gJh8+6q3OLm0kAQ= github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1/go.mod h1:ZqyKKvMP5nKDldQoKmur+Wcx7wVlV9Q98phFqHzr+KY= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rhnvrm/simples3 v0.5.0/go.mod h1:Y+3vYm2V7Y4VijFoJHHTrja6OgPrJ2cBti8dPGkC3sA= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= @@ -1192,6 +1196,8 @@ github.com/rubenv/sql-migrate v0.0.0-20190212093014-1007f53448d7/go.mod h1:WS0rl github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc= +github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/santhosh-tekuri/jsonschema/v2 v2.1.0/go.mod h1:yzJzKUGV4RbWqWIBBP4wSOBqavX5saE02yirLS0OTyg= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= @@ -1579,8 +1585,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1954,8 +1960,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/DataDog/dd-trace-go.v1 v1.22.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= gopkg.in/DataDog/dd-trace-go.v1 v1.27.0/go.mod h1:Sp1lku8WJMvNV0kjDI4Ni/T7J/U3BO5ct5kEaoVU8+I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= diff --git a/monodocs-environment.lock.yaml b/monodocs-environment.lock.yaml index 6c4556a8f5..432354dd9e 100644 --- a/monodocs-environment.lock.yaml +++ b/monodocs-environment.lock.yaml @@ -13,8 +13,8 @@ version: 1 metadata: content_hash: - linux-64: 88915c74ac07d46f51bc09ba5ddabb66109646121fc129c33e63f7d99a1498b2 - osx-arm64: c9116621440baaedcc778afa476679092be2ff09e432dab49449b2f0d0d3ba35 + linux-64: d9c5e8f9f72be6dc0890439823013f20b75a91695591122a46b87515d44730e9 + osx-arm64: a1778e6712d7038b4d3cd7283d55df371201a5356fca0a436882341a1c159b7b channels: - url: conda-forge used_env_vars: [] @@ -308,7 +308,7 @@ package: category: main optional: false - name: anyio - version: 4.6.0 + version: 4.6.2.post1 manager: conda platform: linux-64 dependencies: @@ -317,14 +317,14 @@ package: python: '>=3.9' sniffio: '>=1.1' typing_extensions: '>=4.1' - url: https://conda.anaconda.org/conda-forge/noarch/anyio-4.6.0-pyhd8ed1ab_1.conda + url: https://conda.anaconda.org/conda-forge/noarch/anyio-4.6.2.post1-pyhd8ed1ab_0.conda hash: - md5: bc13891a047f50728b03595531f7f92e - sha256: d05493abca6ac1b0cb15f5d48c3117bddd73cc21e48bfcb460570cfa2ea2f909 + md5: 688697ec5e9588bdded167d19577625b + sha256: 4b54b7ce79d818e3cce54ae4d552dba51b7afac160ceecdefd04b3917a37c502 category: main optional: false - name: anyio - version: 4.6.0 + version: 4.6.2.post1 manager: conda platform: osx-arm64 dependencies: @@ -333,10 +333,10 @@ package: python: '>=3.9' sniffio: '>=1.1' typing_extensions: '>=4.1' - url: https://conda.anaconda.org/conda-forge/noarch/anyio-4.6.0-pyhd8ed1ab_1.conda + url: https://conda.anaconda.org/conda-forge/noarch/anyio-4.6.2.post1-pyhd8ed1ab_0.conda hash: - md5: bc13891a047f50728b03595531f7f92e - sha256: d05493abca6ac1b0cb15f5d48c3117bddd73cc21e48bfcb460570cfa2ea2f909 + md5: 688697ec5e9588bdded167d19577625b + sha256: 4b54b7ce79d818e3cce54ae4d552dba51b7afac160ceecdefd04b3917a37c502 category: main optional: false - name: aplus @@ -571,10 +571,10 @@ package: dependencies: python: '>=3.6' six: '>=1.6.1,<2.0' - url: https://conda.anaconda.org/conda-forge/noarch/astunparse-1.6.3-pyhd8ed1ab_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/noarch/astunparse-1.6.3-pyhd8ed1ab_2.conda hash: - md5: 000b6f68a0bfaba800ced7500c11780f - sha256: e5173d1ed038038e24c0623f0219dc587ee8663cf7efa737e7075128edbc6c60 + md5: 78d205ed5af12a89068386a6e2ca6ee2 + sha256: f2c00eb43d8f331c0987bdcfc44a1c244f438b5a088f5871a522524ab593954d category: main optional: false - name: astunparse @@ -584,10 +584,10 @@ package: dependencies: python: '>=3.6' six: '>=1.6.1,<2.0' - url: https://conda.anaconda.org/conda-forge/noarch/astunparse-1.6.3-pyhd8ed1ab_0.tar.bz2 + url: https://conda.anaconda.org/conda-forge/noarch/astunparse-1.6.3-pyhd8ed1ab_2.conda hash: - md5: 000b6f68a0bfaba800ced7500c11780f - sha256: e5173d1ed038038e24c0623f0219dc587ee8663cf7efa737e7075128edbc6c60 + md5: 78d205ed5af12a89068386a6e2ca6ee2 + sha256: f2c00eb43d8f331c0987bdcfc44a1c244f438b5a088f5871a522524ab593954d category: main optional: false - name: async-lru @@ -1468,37 +1468,37 @@ package: category: main optional: false - name: boto3 - version: 1.35.38 + version: 1.35.40 manager: conda platform: linux-64 dependencies: - botocore: '>=1.35.38,<1.36.0' + botocore: '>=1.35.40,<1.36.0' jmespath: '>=0.7.1,<2.0.0' python: '>=3.8' s3transfer: '>=0.10.0,<0.11.0' - url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.35.38-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.35.40-pyhd8ed1ab_0.conda hash: - md5: aaa069276ef2817e85732d419118213c - sha256: 7d1c85cb38a2465fa832ad9223f1731bff3a57fb244a5c2f51051e52781145b1 + md5: daf559311bbe42d4cd1fe3bf6f2ea4f2 + sha256: f5857681cb2fc77957cc8459da979b2c3b9cd30a761b9728e8ecdaede39ed949 category: main optional: false - name: boto3 - version: 1.35.38 + version: 1.35.40 manager: conda platform: osx-arm64 dependencies: - botocore: '>=1.35.38,<1.36.0' + botocore: '>=1.35.40,<1.36.0' jmespath: '>=0.7.1,<2.0.0' python: '>=3.8' s3transfer: '>=0.10.0,<0.11.0' - url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.35.38-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/boto3-1.35.40-pyhd8ed1ab_0.conda hash: - md5: aaa069276ef2817e85732d419118213c - sha256: 7d1c85cb38a2465fa832ad9223f1731bff3a57fb244a5c2f51051e52781145b1 + md5: daf559311bbe42d4cd1fe3bf6f2ea4f2 + sha256: f5857681cb2fc77957cc8459da979b2c3b9cd30a761b9728e8ecdaede39ed949 category: main optional: false - name: botocore - version: 1.35.38 + version: 1.35.40 manager: conda platform: linux-64 dependencies: @@ -1506,14 +1506,14 @@ package: python: '>=3.8' python-dateutil: '>=2.1,<3.0.0' urllib3: '>=1.25.4,<1.27' - url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.35.38-pyge38_1234567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.35.40-pyge38_1234567_0.conda hash: - md5: dfcbb2f8dbbc08722fc480bb13b1481d - sha256: 94f0a900a72381a6696a0f69857a4508ac24526d1000586b0450f32bb351e0c8 + md5: e021a01a5d84d3b13b1f5d505f4137ba + sha256: 8dcf9ad28de988b78cd34a27a77db2e7e323d2e6a9baae6ae9240e623ef6eee6 category: main optional: false - name: botocore - version: 1.35.38 + version: 1.35.40 manager: conda platform: osx-arm64 dependencies: @@ -1521,10 +1521,10 @@ package: python: '>=3.8' python-dateutil: '>=2.1,<3.0.0' urllib3: '>=1.25.4,<1.27' - url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.35.38-pyge38_1234567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/botocore-1.35.40-pyge38_1234567_0.conda hash: - md5: dfcbb2f8dbbc08722fc480bb13b1481d - sha256: 94f0a900a72381a6696a0f69857a4508ac24526d1000586b0450f32bb351e0c8 + md5: e021a01a5d84d3b13b1f5d505f4137ba + sha256: 8dcf9ad28de988b78cd34a27a77db2e7e323d2e6a9baae6ae9240e623ef6eee6 category: main optional: false - name: branca @@ -1755,28 +1755,28 @@ package: category: main optional: false - name: c-ares - version: 1.34.1 + version: 1.34.2 manager: conda platform: linux-64 dependencies: __glibc: '>=2.28,<3.0.a0' libgcc: '>=13' - url: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.1-heb4867d_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/c-ares-1.34.2-heb4867d_0.conda hash: - md5: db792eada25e970c46642f624b029fd7 - sha256: d7e50b2ce3ef01dfbb11e8f50411b4be91b92c94cd10a83c843f1f2e53832e04 + md5: 2b780c0338fc0ffa678ac82c54af51fd + sha256: c2a515e623ac3e17a56027c06098fbd5ab47afefefbd386b4c21289f2ec55139 category: main optional: false - name: c-ares - version: 1.34.1 + version: 1.34.2 manager: conda platform: osx-arm64 dependencies: __osx: '>=11.0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.1-hd74edd7_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/c-ares-1.34.2-h7ab814d_0.conda hash: - md5: 38c2c944d30ca320a8751b214ed5364e - sha256: 6b864a213027340fbcf42a04ca67d4f8b908a714a5c6e160e6fb6ad21af795e4 + md5: 8a8cfc11064b521bc54bd2d8591cb137 + sha256: 24d53d27397f9c2f0c168992690b5ec1bd62593fb4fc1f1e906ab91b10fd06c3 category: main optional: false - name: ca-certificates @@ -2028,14 +2028,14 @@ package: dependencies: __osx: '>=11.0' bzip2: '>=1.0.8,<2.0a0' - libcurl: '>=8.8.0,<9.0a0' + libcurl: '>=8.10.1,<9.0a0' libgfortran: 5.* libgfortran5: '>=13.2.0' libzlib: '>=1.3.1,<2.0a0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/cfitsio-4.4.1-h793ed5c_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/cfitsio-4.4.1-hd313823_1.conda hash: - md5: c2a9a79b58d2de021ad9295f53e1f40a - sha256: cad6c9f86f98f1ac980e8229ef76a9bb8f62d167a52d29770e0548c7f9a80eb1 + md5: d87f4a6fb494463885683859648c9e3a + sha256: 1c3ca3b98086c276d0480549366a6695b7df4a7a98bf82942cb5d687bb3b1952 category: main optional: false - name: charset-normalizer @@ -2141,27 +2141,27 @@ package: category: main optional: false - name: cloudpickle - version: 3.0.0 + version: 3.1.0 manager: conda platform: linux-64 dependencies: python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/cloudpickle-3.0.0-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/cloudpickle-3.1.0-pyhd8ed1ab_0.conda hash: - md5: 753d29fe41bb881e4b9c004f0abf973f - sha256: 0dfbc1ffa72e7a0882f486c9b1e4e9cccb68cf5c576fe53a89d076c9f1d43754 + md5: d1e8704eb346e1d4b86b5cc1a6fe99f2 + sha256: f29f75c793c3acb6df8565d77e4c3b23436e3647c9e1c562c55d1cb2ddaeaf05 category: main optional: false - name: cloudpickle - version: 3.0.0 + version: 3.1.0 manager: conda platform: osx-arm64 dependencies: python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/cloudpickle-3.0.0-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/cloudpickle-3.1.0-pyhd8ed1ab_0.conda hash: - md5: 753d29fe41bb881e4b9c004f0abf973f - sha256: 0dfbc1ffa72e7a0882f486c9b1e4e9cccb68cf5c576fe53a89d076c9f1d43754 + md5: d1e8704eb346e1d4b86b5cc1a6fe99f2 + sha256: f29f75c793c3acb6df8565d77e4c3b23436e3647c9e1c562c55d1cb2ddaeaf05 category: main optional: false - name: codespell @@ -3145,11 +3145,10 @@ package: libgcc: '>=13' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - pytz: '' - url: https://conda.anaconda.org/conda-forge/linux-64/fastavro-1.9.7-py39h8cd3c5a_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/fastavro-1.9.7-py39h8cd3c5a_1.conda hash: - md5: f4693e6b4702af8007542af138190dea - sha256: ba40286eea1a3cb9cf90ac5422a42677ebdc7bbc5c0ccb855332fe1de01aa180 + md5: a8247f20f35f24945dbc10a96236835a + sha256: 999b730a1093324c5a5092fe3d71d8c2a9a8a59750cf18c9acf47d7d79557e78 category: main optional: false - name: fastavro @@ -3160,11 +3159,10 @@ package: __osx: '>=11.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - pytz: '' - url: https://conda.anaconda.org/conda-forge/osx-arm64/fastavro-1.9.7-py39h06df861_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/fastavro-1.9.7-py39h57695bc_1.conda hash: - md5: 2c8dfc3c50b97673e711a8e71f302295 - sha256: f0cc939f9ed7cb42ea8b7a3ca47ecfc396682677e55ad79c414e85168253fcb2 + md5: 38d4b97b1be68c54b48be090c28b7926 + sha256: cf8ebabede71428f8528267292ad28de00c2d8e76d87895aab696d5e0dba4f22 category: main optional: false - name: filelock @@ -3663,7 +3661,7 @@ package: category: main optional: false - name: frozendict - version: 2.4.5 + version: 2.4.6 manager: conda platform: linux-64 dependencies: @@ -3671,24 +3669,24 @@ package: libgcc: '>=13' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/linux-64/frozendict-2.4.5-py39h8cd3c5a_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/frozendict-2.4.6-py39h8cd3c5a_0.conda hash: - md5: 03c3ce70f51a909342b576cd9dcfc579 - sha256: 67b60dc79f324ed7a7dfe3ea80004571c5023479cc0a67d1e1048879bae90f7f + md5: ef1900b71355f102b94a322685ae2f5f + sha256: 23fdb3b3d4f7683734ca017d597943a61a577ac7730e215715ee414d959184f8 category: main optional: false - name: frozendict - version: 2.4.5 + version: 2.4.6 manager: conda platform: osx-arm64 dependencies: __osx: '>=11.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/osx-arm64/frozendict-2.4.5-py39h06df861_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/frozendict-2.4.6-py39h57695bc_0.conda hash: - md5: ef654fd6a0e99510327976b480e0ed33 - sha256: d91d9bfc96537eb1041eec28e5832bf2624fc614af35893e177c450aa62a0f2b + md5: fb972e193d93f4bc8919e5c8d7b6e24e + sha256: 0f64fc89baad9ce4d12728378ff762951b811465acf580ac421dafa5f4d3869f category: main optional: false - name: frozenlist @@ -4475,39 +4473,39 @@ package: category: main optional: false - name: google-cloud-bigquery-storage - version: 2.26.0 + version: 2.27.0 manager: conda platform: linux-64 dependencies: fastavro: '>=0.21.2' - google-cloud-bigquery-storage-core: 2.26.0.* + google-cloud-bigquery-storage-core: 2.27.0.* pandas: '>=0.21.1' pyarrow: '>=0.15.0' python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-2.26.0-pyhff2d567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-2.27.0-pyhff2d567_0.conda hash: - md5: cb751865bb5abc4eee94b01e0b7859f7 - sha256: a19a57b10cb9c9aabc0cad742ee44ed1055e2787e82307b7ce25cc6b4663f420 + md5: 35d2f945bf888a3612b75a73ace59152 + sha256: f712295ba2c4006fd006635caba75ee940e268655754431e5265e02828194e94 category: main optional: false - name: google-cloud-bigquery-storage - version: 2.26.0 + version: 2.27.0 manager: conda platform: osx-arm64 dependencies: fastavro: '>=0.21.2' - google-cloud-bigquery-storage-core: 2.26.0.* + google-cloud-bigquery-storage-core: 2.27.0.* pandas: '>=0.21.1' pyarrow: '>=0.15.0' python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-2.26.0-pyhff2d567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-2.27.0-pyhff2d567_0.conda hash: - md5: cb751865bb5abc4eee94b01e0b7859f7 - sha256: a19a57b10cb9c9aabc0cad742ee44ed1055e2787e82307b7ce25cc6b4663f420 + md5: 35d2f945bf888a3612b75a73ace59152 + sha256: f712295ba2c4006fd006635caba75ee940e268655754431e5265e02828194e94 category: main optional: false - name: google-cloud-bigquery-storage-core - version: 2.26.0 + version: 2.27.0 manager: conda platform: linux-64 dependencies: @@ -4516,14 +4514,14 @@ package: proto-plus: '>=1.22.0,<2.0.0dev' protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-core-2.26.0-pyhff2d567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-core-2.27.0-pyhff2d567_0.conda hash: - md5: b5e4d73400b6c76e58c72c0e96e8436f - sha256: 165159bcc949e00913d9cf2e19a85b0a4a588379bc2dc0823d687c9ac5271924 + md5: 9ea2bb1ebc301c01ee1d04a645af6b14 + sha256: fb9269c2426aab919cd0b3bb5e45e84a3bb0347240faa5be20f36053f867eebe category: main optional: false - name: google-cloud-bigquery-storage-core - version: 2.26.0 + version: 2.27.0 manager: conda platform: osx-arm64 dependencies: @@ -4532,10 +4530,10 @@ package: proto-plus: '>=1.22.0,<2.0.0dev' protobuf: '>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5' python: '>=3.8' - url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-core-2.26.0-pyhff2d567_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/google-cloud-bigquery-storage-core-2.27.0-pyhff2d567_0.conda hash: - md5: b5e4d73400b6c76e58c72c0e96e8436f - sha256: 165159bcc949e00913d9cf2e19a85b0a4a588379bc2dc0823d687c9ac5271924 + md5: 9ea2bb1ebc301c01ee1d04a645af6b14 + sha256: fb9269c2426aab919cd0b3bb5e45e84a3bb0347240faa5be20f36053f867eebe category: main optional: false - name: google-cloud-core @@ -4737,29 +4735,29 @@ package: category: main optional: false - name: graphql-core - version: 3.2.4 + version: 3.2.5 manager: conda platform: linux-64 dependencies: python: '>=3.6' typing_extensions: '>=4,<5' - url: https://conda.anaconda.org/conda-forge/noarch/graphql-core-3.2.4-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/graphql-core-3.2.5-pyhd8ed1ab_0.conda hash: - md5: b41168a94d22894d56bdc69efc032a2b - sha256: f32ef1216fe0bff8d6dd86e2a4093f18ee0a6309a2057ba2d26c190111fe40b0 + md5: 415114255be0890a078eae6d0a9d0e2b + sha256: a7e6d2511aa1285bfce0261e5a42d06ac9272e8799bd63b37b84ef72f8ed6b30 category: main optional: false - name: graphql-core - version: 3.2.4 + version: 3.2.5 manager: conda platform: osx-arm64 dependencies: python: '>=3.6' typing_extensions: '>=4,<5' - url: https://conda.anaconda.org/conda-forge/noarch/graphql-core-3.2.4-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/graphql-core-3.2.5-pyhd8ed1ab_0.conda hash: - md5: b41168a94d22894d56bdc69efc032a2b - sha256: f32ef1216fe0bff8d6dd86e2a4093f18ee0a6309a2057ba2d26c190111fe40b0 + md5: 415114255be0890a078eae6d0a9d0e2b + sha256: a7e6d2511aa1285bfce0261e5a42d06ac9272e8799bd63b37b84ef72f8ed6b30 category: main optional: false - name: graphql-relay @@ -5171,16 +5169,17 @@ package: manager: conda platform: linux-64 dependencies: + __glibc: '>=2.17,<3.0.a0' cached-property: '' hdf5: '>=1.14.3,<1.14.4.0a0' - libgcc-ng: '>=12' + libgcc: '>=13' numpy: '>=1.19,<3' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.11.0-nompi_py39h24b94d4_102.conda + url: https://conda.anaconda.org/conda-forge/linux-64/h5py-3.11.0-nompi_py39h30a5a8d_103.conda hash: - md5: ce2f5518b03b8b91a919c9e977bd88d1 - sha256: a74ccb08415f8df4ea7071b1cd85f396c62a6482aca565b2ac4f5284952c0750 + md5: 875851870752d93655c848dafab4bc0d + sha256: a1abdf04c5cd10569dc19c98d97baad2864bf42cb16290ec1c83826fb3a1c5e3 category: main optional: false - name: h5py @@ -5194,10 +5193,10 @@ package: numpy: '>=1.19,<3' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.11.0-nompi_py39h534c8c8_102.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/h5py-3.11.0-nompi_py39h5dd549c_103.conda hash: - md5: f5f7233e2910737b952246627afa7393 - sha256: e05984ae47df5b70bab3a48433ae241e944a38f3b773bebf53650be3adbb6fe4 + md5: 27843e4b147c39b85dcf3744418b45d6 + sha256: 8aaff2990bcb2ef8a03d36852d1e8934a6f2a88b019190f1bcab35dd559874d9 category: main optional: false - name: harfbuzz @@ -7203,7 +7202,7 @@ package: libcxx: '>=17' libgoogle-cloud: '>=2.29.0,<2.30.0a0' libgoogle-cloud-storage: '>=2.29.0,<2.30.0a0' - libre2-11: '>=2023.9.1,<2024.0a0' + libre2-11: '>=2023.9.1' libutf8proc: '>=2.8.0,<3.0a0' libzlib: '>=1.3.1,<2.0a0' lz4-c: '>=1.9.3,<1.10.0a0' @@ -8144,10 +8143,10 @@ package: libintl: '>=0.22.5,<1.0a0' libzlib: '>=1.3.1,<2.0a0' pcre2: '>=10.44,<10.45.0a0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/libglib-2.82.1-h4821c08_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/libglib-2.82.1-h4821c08_1.conda hash: - md5: 50e859d1781857abf820ec5423073a21 - sha256: 0ad22c14fbe77144111abb5495894e02a124773a4d2d6c2ded5c7d66aec694b4 + md5: 277cf745965bba2d70dbeec422cbff40 + sha256: 5494aefb97f3e0f7cbc10ab3573e227dcb436c77d104ecd3c29e6d7543c32eb5 category: main optional: false - name: libgoogle-cloud @@ -8235,7 +8234,7 @@ package: libabseil: '>=20240116.1,<20240117.0a0' libcxx: '>=16' libprotobuf: '>=4.25.3,<4.25.4.0a0' - libre2-11: '>=2023.9.1,<2024.0a0' + libre2-11: '>=2023.9.1' libzlib: '>=1.2.13,<2.0.0a0' openssl: '>=3.2.1,<4.0a0' re2: '' @@ -8629,10 +8628,10 @@ package: krb5: '>=1.21.3,<1.22.0a0' openldap: '>=2.6.8,<2.7.0a0' openssl: '>=3.3.2,<4.0a0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/libpq-17.0-h7536039_2.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/libpq-17.0-h9fd3c6c_3.conda hash: - md5: 4ffa9cafca79387f390ce427c6e0cbb0 - sha256: 32d86f83709cc6182262960d94fc56e3cba46496be63e711fde73af1276d634e + md5: 166c7f2d33bbbf9afb5bd5ae03a06230 + sha256: e314a678eb74ecc3d0625ed7be0ae68ba188d758419c4d3c6cb37ef685a88093 category: main optional: false - name: libprotobuf @@ -9245,10 +9244,10 @@ package: platform: linux-64 dependencies: __glibc: '>=2.17,<3.0.a0' - url: https://conda.anaconda.org/conda-forge/linux-64/llvm-openmp-19.1.1-h024ca30_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/llvm-openmp-19.1.1-h024ca30_1.conda hash: - md5: f1fe1a838fecddbcee97c9d4afe24af5 - sha256: cde25f97acebbce09b70aa7f115e900676ac2f102a9afa916b5cf84cbf2a465a + md5: ea889be010d5d66a7e6dd5e1b04c70d7 + sha256: 780739b625ce1836fde67884b34abb6e193402de297d25aab81c21467210fd74 category: main optional: false - name: llvm-openmp @@ -9257,10 +9256,10 @@ package: platform: osx-arm64 dependencies: __osx: '>=11.0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-19.1.1-h6cdba0f_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/llvm-openmp-19.1.1-hb52a8e5_1.conda hash: - md5: e509675b5f2dff8cbd7de8f9362bafac - sha256: f325a123dffba3dbf090ced4d8b05fd9f7c7151180f7bdd5952c146017a20f4c + md5: 6eab363cb011e739cf6f3bb92b763525 + sha256: bac90d68cd6a1b5f0ae21e900715d425b02a3be8f6199a5e2dbcb126d8525a6e category: main optional: false - name: locket @@ -12023,10 +12022,10 @@ package: tzcode: '' tzdata: '' zstd: '>=1.5.6,<1.6.0a0' - url: https://conda.anaconda.org/conda-forge/osx-arm64/postgresql-17.0-h821f464_2.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/postgresql-17.0-h25379d5_3.conda hash: - md5: b78230ff45c9c78bd196d121c0422eb8 - sha256: bfbc20ea47029f39f7b9721334bea6701d2df4061d8de2ccc5967626a8090e40 + md5: 0f6351dc09d5410726ed1d5c6d03e3e5 + sha256: f83dd89bbb7c76fee1a65e14ae438312598182b22274d806caded45bc4e6747c category: main optional: false - name: pre-commit @@ -12324,10 +12323,10 @@ package: libgcc: '>=13' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/linux-64/psutil-6.0.0-py39h8cd3c5a_1.conda + url: https://conda.anaconda.org/conda-forge/linux-64/psutil-6.0.0-py39h8cd3c5a_2.conda hash: - md5: 45a3a1bbc95b90e35af5976c3d957c9f - sha256: 6433c4aa276d673796fc69d823e4fbee0984def6a5056650e758930ba70a6569 + md5: 658a024659b412cba60eb14a394f0d54 + sha256: c08f2d667bbe80530c614f01da227c1aa33df8e4ec76274fad2c90c7c00f6aef category: main optional: false - name: psutil @@ -12338,10 +12337,10 @@ package: __osx: '>=11.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-6.0.0-py39h06df861_1.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/psutil-6.0.0-py39h57695bc_2.conda hash: - md5: 8808544125d0b791266d093b72f0ce8d - sha256: e76fb745fecaa12cd1d33383f459922d9c8614ae7b587b21e833351f6a46a120 + md5: 68253dcc43431a5e9277602d3240c2c2 + sha256: 0f68f4e9f24f08ee9a923a6d6c34e13a3d545251c6c00022db6ea99396975db0 category: main optional: false - name: psycopg2 @@ -12886,27 +12885,27 @@ package: category: main optional: false - name: pyparsing - version: 3.1.4 + version: 3.2.0 manager: conda platform: linux-64 dependencies: - python: '>=3.6' - url: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.1.4-pyhd8ed1ab_0.conda + python: '>=3.9' + url: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.2.0-pyhd8ed1ab_1.conda hash: - md5: 4d91352a50949d049cf9714c8563d433 - sha256: 8714a83f1aeac278b3eb33c7cb880c95c9a5924e7a5feeb9e87e7d0837afa085 + md5: 035c17fbf099f50ff60bf2eb303b0a83 + sha256: b846e3965cd106438cf0b9dc0de8d519670ac065f822a7d66862e9423e0229cb category: main optional: false - name: pyparsing - version: 3.1.4 + version: 3.2.0 manager: conda platform: osx-arm64 dependencies: - python: '>=3.6' - url: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.1.4-pyhd8ed1ab_0.conda + python: '>=3.9' + url: https://conda.anaconda.org/conda-forge/noarch/pyparsing-3.2.0-pyhd8ed1ab_1.conda hash: - md5: 4d91352a50949d049cf9714c8563d433 - sha256: 8714a83f1aeac278b3eb33c7cb880c95c9a5924e7a5feeb9e87e7d0837afa085 + md5: 035c17fbf099f50ff60bf2eb303b0a83 + sha256: b846e3965cd106438cf0b9dc0de8d519670ac065f822a7d66862e9423e0229cb category: main optional: false - name: pyproj @@ -13962,13 +13961,14 @@ package: manager: conda platform: linux-64 dependencies: - libgcc-ng: '>=12' + __glibc: '>=2.17,<3.0.a0' + libgcc: '>=13' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/linux-64/ruamel.yaml.clib-0.2.8-py39hd1e30aa_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/ruamel.yaml.clib-0.2.8-py39h8cd3c5a_1.conda hash: - md5: b1961e70cfe8e1eac243faf933d1813f - sha256: 32b7b4f13493eeff0d18de85d58d7b8c2b04234ea737b8769871067189c70d69 + md5: 52b68618d0aa78366f287de1b1319a1c + sha256: 269ea8b5514b788299398765f0fbdaff941875d76796966e866528ecbf217f90 category: main optional: false - name: ruamel.yaml.clib @@ -13976,12 +13976,13 @@ package: manager: conda platform: osx-arm64 dependencies: + __osx: '>=11.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/osx-arm64/ruamel.yaml.clib-0.2.8-py39h17cfd9d_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/ruamel.yaml.clib-0.2.8-py39h57695bc_1.conda hash: - md5: b4b13ca14d2848049adc82fed7c89e64 - sha256: d128e55fb573217a9ef6189e62172b2b497d7163d7b3097cf6ff0c6bf29a6a1a + md5: 34f6d0337554e552639c2f1f99cd41ad + sha256: 3fd2ac1417604aa0a279f2c624bf6f4180d26a217087d0ede1ca005e8b627cea category: main optional: false - name: s2n @@ -15058,31 +15059,31 @@ package: category: main optional: false - name: sphinxcontrib-mermaid - version: 0.9.2 + version: 1.0.0 manager: conda platform: linux-64 dependencies: - docutils: '' python: '>=3.7' + pyyaml: '' sphinx: '' - url: https://conda.anaconda.org/conda-forge/noarch/sphinxcontrib-mermaid-0.9.2-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/sphinxcontrib-mermaid-1.0.0-pyhd8ed1ab_0.conda hash: - md5: 54a6a75e5b3989f1d925d8e5674bbbcb - sha256: bb02467bb3569406d978112f299e8d8b0832cc495b8bbd5d591858ddbe3a291d + md5: a906d0a778a54834ffd15d22bdda9ddd + sha256: 419e221a58330dececc14151c7fe5acb80d303c11879fa406638c6d1a4c3dff1 category: main optional: false - name: sphinxcontrib-mermaid - version: 0.9.2 + version: 1.0.0 manager: conda platform: osx-arm64 dependencies: - docutils: '' python: '>=3.7' + pyyaml: '' sphinx: '' - url: https://conda.anaconda.org/conda-forge/noarch/sphinxcontrib-mermaid-0.9.2-pyhd8ed1ab_0.conda + url: https://conda.anaconda.org/conda-forge/noarch/sphinxcontrib-mermaid-1.0.0-pyhd8ed1ab_0.conda hash: - md5: 54a6a75e5b3989f1d925d8e5674bbbcb - sha256: bb02467bb3569406d978112f299e8d8b0832cc495b8bbd5d591858ddbe3a291d + md5: a906d0a778a54834ffd15d22bdda9ddd + sha256: 419e221a58330dececc14151c7fe5acb80d303c11879fa406638c6d1a4c3dff1 category: main optional: false - name: sphinxcontrib-qthelp @@ -17096,7 +17097,7 @@ package: category: main optional: false - name: yarl - version: 1.14.0 + version: 1.15.2 manager: conda platform: linux-64 dependencies: @@ -17107,14 +17108,14 @@ package: propcache: '>=0.2.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/linux-64/yarl-1.14.0-py39h8cd3c5a_0.conda + url: https://conda.anaconda.org/conda-forge/linux-64/yarl-1.15.2-py39h8cd3c5a_0.conda hash: - md5: aefe5e8b6c61810bf76c972544eb61f0 - sha256: 3d7a979b5f0af574d73014b7249dd36815eb21f84957b002038c40f41e536772 + md5: fe2cac0e053f9155af479676f58beeb5 + sha256: da8fe71f583c052fa262f807de233d62cda2cee7fa43b65b749d253715a4ade2 category: main optional: false - name: yarl - version: 1.14.0 + version: 1.15.2 manager: conda platform: osx-arm64 dependencies: @@ -17124,10 +17125,10 @@ package: propcache: '>=0.2.0' python: '>=3.9,<3.10.0a0' python_abi: 3.9.* - url: https://conda.anaconda.org/conda-forge/osx-arm64/yarl-1.14.0-py39h06df861_0.conda + url: https://conda.anaconda.org/conda-forge/osx-arm64/yarl-1.15.2-py39h57695bc_0.conda hash: - md5: bcbb778cbe6ea50b5c0d0f16cb76745a - sha256: e390c4dac1d474937d2001d3ee7a0763ebab60d93dcef64edb5b115eddb162ca + md5: c426809986f74e266b6cae7c87d8a206 + sha256: 5c7f619a5a86ebb1239d162f9679d016fabb56cbd45d1ca46cd167f6f82b82bf category: main optional: false - name: zeromq @@ -17317,58 +17318,78 @@ package: sha256: 2005c8e124fda3948f2a6abb2dbebb2c936d2d821acaca6afd61932edfa9bc07 category: main optional: false +- name: aenum + version: 3.1.15 + manager: pip + platform: linux-64 + dependencies: {} + url: https://files.pythonhosted.org/packages/d0/fa/ca0c66b388624ba9dbbf35aab3a9f326bfdf5e56a7237fe8f1b600da6864/aenum-3.1.15-py3-none-any.whl + hash: + sha256: e0dfaeea4c2bd362144b87377e2c61d91958c5ed0b4daf89cb6f45ae23af6288 + category: main + optional: false +- name: aenum + version: 3.1.15 + manager: pip + platform: osx-arm64 + dependencies: {} + url: https://files.pythonhosted.org/packages/d0/fa/ca0c66b388624ba9dbbf35aab3a9f326bfdf5e56a7237fe8f1b600da6864/aenum-3.1.15-py3-none-any.whl + hash: + sha256: e0dfaeea4c2bd362144b87377e2c61d91958c5ed0b4daf89cb6f45ae23af6288 + category: main + optional: false - name: aioboto3 - version: 13.1.1 + version: 13.2.0 manager: pip platform: linux-64 dependencies: - aiobotocore: 2.13.1 + aiobotocore: 2.15.2 aiofiles: '>=23.2.1' - url: https://files.pythonhosted.org/packages/99/de/c9ebaf88400e178e4925077fe03dadfebbd5055c0d3de65e95e5cf618398/aioboto3-13.1.1-py3-none-any.whl + url: https://files.pythonhosted.org/packages/25/66/e4b2d8f3d11687f7c63b1b63e484ee879f9af637b3564026037655d83255/aioboto3-13.2.0-py3-none-any.whl hash: - sha256: 4b44a7c1317a51479b92ee57a2fea2cdef6bea2c3669870830b3f4dec6be7ca0 + sha256: fd894b8d319934dfd75285b58da35560670e57182d0148c54a3d4ee5da730c78 category: main optional: false - name: aioboto3 - version: 13.1.1 + version: 13.2.0 manager: pip platform: osx-arm64 dependencies: - aiobotocore: 2.13.1 + aiobotocore: 2.15.2 aiofiles: '>=23.2.1' - url: https://files.pythonhosted.org/packages/99/de/c9ebaf88400e178e4925077fe03dadfebbd5055c0d3de65e95e5cf618398/aioboto3-13.1.1-py3-none-any.whl + url: https://files.pythonhosted.org/packages/25/66/e4b2d8f3d11687f7c63b1b63e484ee879f9af637b3564026037655d83255/aioboto3-13.2.0-py3-none-any.whl hash: - sha256: 4b44a7c1317a51479b92ee57a2fea2cdef6bea2c3669870830b3f4dec6be7ca0 + sha256: fd894b8d319934dfd75285b58da35560670e57182d0148c54a3d4ee5da730c78 category: main optional: false - name: aiobotocore - version: 2.13.1 + version: 2.15.2 manager: pip platform: linux-64 dependencies: - botocore: '>=1.34.70,<1.34.132' + botocore: '>=1.35.16,<1.35.37' aiohttp: '>=3.9.2,<4.0.0' wrapt: '>=1.10.10,<2.0.0' aioitertools: '>=0.5.1,<1.0.0' - boto3: '>=1.34.70,<1.34.132' - url: https://files.pythonhosted.org/packages/30/07/42f884c1600169e4267575cdd261c75dea31782d8fd877bbea358d559416/aiobotocore-2.13.1-py3-none-any.whl + boto3: '>=1.35.16,<1.35.37' + url: https://files.pythonhosted.org/packages/a4/57/6402242dde160d9ef9903487b4277443dc3da04615f6c4d3b48564a8ab57/aiobotocore-2.15.2-py3-none-any.whl hash: - sha256: 1bef121b99841ee3cc788e4ed97c332ba32353b1f00e886d1beb3aae95520858 + sha256: d4d3128b4b558e2b4c369bfa963b022d7e87303adb82eec623cec8aa77ae578a category: main optional: false - name: aiobotocore - version: 2.13.1 + version: 2.15.2 manager: pip platform: osx-arm64 dependencies: - botocore: '>=1.34.70,<1.34.132' + botocore: '>=1.35.16,<1.35.37' aiohttp: '>=3.9.2,<4.0.0' wrapt: '>=1.10.10,<2.0.0' aioitertools: '>=0.5.1,<1.0.0' - boto3: '>=1.34.70,<1.34.132' - url: https://files.pythonhosted.org/packages/30/07/42f884c1600169e4267575cdd261c75dea31782d8fd877bbea358d559416/aiobotocore-2.13.1-py3-none-any.whl + boto3: '>=1.35.16,<1.35.37' + url: https://files.pythonhosted.org/packages/a4/57/6402242dde160d9ef9903487b4277443dc3da04615f6c4d3b48564a8ab57/aiobotocore-2.15.2-py3-none-any.whl hash: - sha256: 1bef121b99841ee3cc788e4ed97c332ba32353b1f00e886d1beb3aae95520858 + sha256: d4d3128b4b558e2b4c369bfa963b022d7e87303adb82eec623cec8aa77ae578a category: main optional: false - name: aiofiles @@ -17466,7 +17487,7 @@ package: category: main optional: false - name: azure-identity - version: 1.18.0 + version: 1.19.0 manager: pip platform: linux-64 dependencies: @@ -17475,13 +17496,13 @@ package: msal: '>=1.30.0' msal-extensions: '>=1.2.0' typing-extensions: '>=4.0.0' - url: https://files.pythonhosted.org/packages/b0/71/1d1bb387b6acaa5daa3e703c70dde3d54823ccd229bd6730de6e724f296e/azure_identity-1.18.0-py3-none-any.whl + url: https://files.pythonhosted.org/packages/f0/d5/3995ed12f941f4a41a273d9b1709282e825ef87ed8eab3833038fee54d59/azure_identity-1.19.0-py3-none-any.whl hash: - sha256: bccf6106245b49ff41d0c4cd7b72851c5a2ba3a32cef7589da246f5727f26f02 + sha256: e3f6558c181692d7509f09de10cca527c7dce426776454fb97df512a46527e81 category: main optional: false - name: azure-identity - version: 1.18.0 + version: 1.19.0 manager: pip platform: osx-arm64 dependencies: @@ -17490,13 +17511,13 @@ package: msal: '>=1.30.0' msal-extensions: '>=1.2.0' typing-extensions: '>=4.0.0' - url: https://files.pythonhosted.org/packages/b0/71/1d1bb387b6acaa5daa3e703c70dde3d54823ccd229bd6730de6e724f296e/azure_identity-1.18.0-py3-none-any.whl + url: https://files.pythonhosted.org/packages/f0/d5/3995ed12f941f4a41a273d9b1709282e825ef87ed8eab3833038fee54d59/azure_identity-1.19.0-py3-none-any.whl hash: - sha256: bccf6106245b49ff41d0c4cd7b72851c5a2ba3a32cef7589da246f5727f26f02 + sha256: e3f6558c181692d7509f09de10cca527c7dce426776454fb97df512a46527e81 category: main optional: false - name: azure-storage-blob - version: 12.23.0 + version: 12.23.1 manager: pip platform: linux-64 dependencies: @@ -17504,13 +17525,13 @@ package: cryptography: '>=2.1.4' typing-extensions: '>=4.6.0' isodate: '>=0.6.1' - url: https://files.pythonhosted.org/packages/60/02/024b71fc0af7a361cfaecbd96120615ef53787e0b4213285e18eb259d198/azure_storage_blob-12.23.0-py3-none-any.whl + url: https://files.pythonhosted.org/packages/df/bf/f19dd2261dd6193aa53375fcd58929d613e45d14bcdb778567d1fd5e2d6e/azure_storage_blob-12.23.1-py3-none-any.whl hash: - sha256: 8ac4b34624ed075eda1e38f0c6dadb601e1b199e27a09aa63edc429bf4a23329 + sha256: 1c2238aa841d1545f42714a5017c010366137a44a0605da2d45f770174bfc6b4 category: main optional: false - name: azure-storage-blob - version: 12.23.0 + version: 12.23.1 manager: pip platform: osx-arm64 dependencies: @@ -17518,9 +17539,9 @@ package: cryptography: '>=2.1.4' typing-extensions: '>=4.6.0' isodate: '>=0.6.1' - url: https://files.pythonhosted.org/packages/60/02/024b71fc0af7a361cfaecbd96120615ef53787e0b4213285e18eb259d198/azure_storage_blob-12.23.0-py3-none-any.whl + url: https://files.pythonhosted.org/packages/df/bf/f19dd2261dd6193aa53375fcd58929d613e45d14bcdb778567d1fd5e2d6e/azure_storage_blob-12.23.1-py3-none-any.whl hash: - sha256: 8ac4b34624ed075eda1e38f0c6dadb601e1b199e27a09aa63edc429bf4a23329 + sha256: 1c2238aa841d1545f42714a5017c010366137a44a0605da2d45f770174bfc6b4 category: main optional: false - name: backports.tarfile @@ -17544,55 +17565,55 @@ package: category: main optional: false - name: boto3 - version: 1.34.131 + version: 1.35.36 manager: pip platform: linux-64 dependencies: - botocore: '>=1.34.131,<1.35.0' + botocore: '>=1.35.36,<1.36.0' jmespath: '>=0.7.1,<2.0.0' s3transfer: '>=0.10.0,<0.11.0' - url: https://files.pythonhosted.org/packages/3e/ce/f5e3fdab6012f5fa4a8f5e97e86cc42549729382a98faffbc1785f85e89f/boto3-1.34.131-py3-none-any.whl + url: https://files.pythonhosted.org/packages/52/6b/8b126c2e1c07fae33185544ea974de67027afc905bd072feef9fbbd38d3d/boto3-1.35.36-py3-none-any.whl hash: - sha256: 05e388cb937e82be70bfd7eb0c84cf8011ff35cf582a593873ac21675268683b + sha256: 33735b9449cd2ef176531ba2cb2265c904a91244440b0e161a17da9d24a1e6d1 category: main optional: false - name: boto3 - version: 1.34.131 + version: 1.35.36 manager: pip platform: osx-arm64 dependencies: - botocore: '>=1.34.131,<1.35.0' + botocore: '>=1.35.36,<1.36.0' jmespath: '>=0.7.1,<2.0.0' s3transfer: '>=0.10.0,<0.11.0' - url: https://files.pythonhosted.org/packages/3e/ce/f5e3fdab6012f5fa4a8f5e97e86cc42549729382a98faffbc1785f85e89f/boto3-1.34.131-py3-none-any.whl + url: https://files.pythonhosted.org/packages/52/6b/8b126c2e1c07fae33185544ea974de67027afc905bd072feef9fbbd38d3d/boto3-1.35.36-py3-none-any.whl hash: - sha256: 05e388cb937e82be70bfd7eb0c84cf8011ff35cf582a593873ac21675268683b + sha256: 33735b9449cd2ef176531ba2cb2265c904a91244440b0e161a17da9d24a1e6d1 category: main optional: false - name: botocore - version: 1.34.131 + version: 1.35.36 manager: pip platform: linux-64 dependencies: jmespath: '>=0.7.1,<2.0.0' python-dateutil: '>=2.1,<3.0.0' urllib3: '>=1.25.4,<1.27' - url: https://files.pythonhosted.org/packages/46/1a/01785fad12a9b1dbeffebd97cd226ea5923114057c64a610dd4eb8a28c7b/botocore-1.34.131-py3-none-any.whl + url: https://files.pythonhosted.org/packages/2a/60/056d58b606731f94fe395266c604ea9efcecc10e6857ceb9b10e6831d746/botocore-1.35.36-py3-none-any.whl hash: - sha256: 13b011d7b206ce00727dcee26548fa3b550db9046d5a0e90ac25a6e6c8fde6ef + sha256: 64241c778bf2dc863d93abab159e14024d97a926a5715056ef6411418cb9ead3 category: main optional: false - name: botocore - version: 1.34.131 + version: 1.35.36 manager: pip platform: osx-arm64 dependencies: jmespath: '>=0.7.1,<2.0.0' python-dateutil: '>=2.1,<3.0.0' urllib3: '>=1.25.4,<1.27' - url: https://files.pythonhosted.org/packages/46/1a/01785fad12a9b1dbeffebd97cd226ea5923114057c64a610dd4eb8a28c7b/botocore-1.34.131-py3-none-any.whl + url: https://files.pythonhosted.org/packages/2a/60/056d58b606731f94fe395266c604ea9efcecc10e6857ceb9b10e6831d746/botocore-1.35.36-py3-none-any.whl hash: - sha256: 13b011d7b206ce00727dcee26548fa3b550db9046d5a0e90ac25a6e6c8fde6ef + sha256: 64241c778bf2dc863d93abab159e14024d97a926a5715056ef6411418cb9ead3 category: main optional: false - name: croniter @@ -17788,23 +17809,23 @@ package: category: main optional: false - name: duckdb - version: 1.1.0 + version: 1.1.2 manager: pip platform: linux-64 dependencies: {} - url: https://files.pythonhosted.org/packages/2a/db/a30a9f643e593c49435ab4c2d13b00687b76993d2c7ccba67fc1cfb32833/duckdb-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + url: https://files.pythonhosted.org/packages/48/9a/1029a2ec5b6755341372834675dd511c4f49e634d5ef312fa8e671c5b3f9/duckdb-1.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl hash: - sha256: aac2fcabe2d5072c252d0b3087365f431de812d8199705089fb073e4d039d19c + sha256: 7ca967c5a57b1d0cb0fd5e539ab24110e5a59dcbedd365bb2dc80533d6e44a8d category: main optional: false - name: duckdb - version: 1.1.0 + version: 1.1.2 manager: pip platform: osx-arm64 dependencies: {} - url: https://files.pythonhosted.org/packages/5d/22/16b4ebc2eaa0d552fdd9ff371647be61bdc2036eae182866203dc0a554c9/duckdb-1.1.0-cp39-cp39-macosx_12_0_arm64.whl + url: https://files.pythonhosted.org/packages/75/6a/ef6cf334680543f1d9ead39fbea8950bf4cd91c4612dd32c33ac4c82fe55/duckdb-1.1.2-cp39-cp39-macosx_12_0_arm64.whl hash: - sha256: bd11bc899cebf5ff936d1276a2dfb7b7db08aba3bcc42924afeafc2163bddb43 + sha256: f87edaf20001530e63a4f7bda13b55dc3152d7171226915f2bf34e0813c8759e category: main optional: false - name: flyteidl @@ -17834,7 +17855,7 @@ package: category: main optional: false - name: flytekit - version: 1.13.6b2 + version: 1.14.0b1 manager: pip platform: linux-64 dependencies: @@ -17861,6 +17882,7 @@ package: marshmallow-enum: '*' marshmallow-jsonschema: '>=0.12.0' mashumaro: '>=3.11' + msgpack: '>=1.1.0' protobuf: '!=4.25.0' pygments: '*' python-json-logger: '>=2.0.0' @@ -17873,13 +17895,13 @@ package: statsd: '>=3.0.0' typing-extensions: '*' urllib3: '>=1.22' - url: https://files.pythonhosted.org/packages/b4/4c/89d0c0675172df2a7638f82740f5372b27f671bd6d0d2c059b4e5ff0fca7/flytekit-1.13.6b2-py3-none-any.whl + url: https://files.pythonhosted.org/packages/33/01/50bacac67fad78c133fd37ba3734b1409f295fbb1730300bab65b7565108/flytekit-1.14.0b1-py3-none-any.whl hash: - sha256: 738f15f929fbd37008455d080417119cefb72151588cfb142596c376270d9ecf + sha256: bdb0299e309f15f66bcde33e7a32c193473b120132754a2bfb95baa3d04c3ab1 category: main optional: false - name: flytekit - version: 1.13.6b2 + version: 1.14.0b1 manager: pip platform: osx-arm64 dependencies: @@ -17906,6 +17928,7 @@ package: marshmallow-enum: '*' marshmallow-jsonschema: '>=0.12.0' mashumaro: '>=3.11' + msgpack: '>=1.1.0' protobuf: '!=4.25.0' pygments: '*' python-json-logger: '>=2.0.0' @@ -17918,47 +17941,35 @@ package: statsd: '>=3.0.0' typing-extensions: '*' urllib3: '>=1.22' - url: https://files.pythonhosted.org/packages/b4/4c/89d0c0675172df2a7638f82740f5372b27f671bd6d0d2c059b4e5ff0fca7/flytekit-1.13.6b2-py3-none-any.whl + url: https://files.pythonhosted.org/packages/33/01/50bacac67fad78c133fd37ba3734b1409f295fbb1730300bab65b7565108/flytekit-1.14.0b1-py3-none-any.whl hash: - sha256: 738f15f929fbd37008455d080417119cefb72151588cfb142596c376270d9ecf + sha256: bdb0299e309f15f66bcde33e7a32c193473b120132754a2bfb95baa3d04c3ab1 category: main optional: false - name: flytekitplugins-deck-standard - version: 1.13.5 + version: 1.13.8 manager: pip platform: linux-64 dependencies: flytekit: '*' - markdown: '*' - plotly: '*' - pandas: '*' - ipywidgets: '*' - pygments: '*' - ydata-profiling: '*' - url: https://files.pythonhosted.org/packages/1e/b5/c7b73fc22f66a24115122a6cf8c1e05ada5ac337de2ffd92e996d8fee294/flytekitplugins_deck_standard-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/d8/70/01c7ca9a4d9fbb6cae8fdb24d06cef9193a4077f43b0df2993203fd62d4d/flytekitplugins_deck_standard-1.13.8-py3-none-any.whl hash: - sha256: 71587e0dc0c7790b8634bfeaa7d7c3eebb6b4a8a9c0713c444ab9237ac9bfe3b + sha256: a0369080b47ac14d0c54e441995f10f739a4ee7b3d6cd2685a3cc9911642ab15 category: main optional: false - name: flytekitplugins-deck-standard - version: 1.13.5 + version: 1.13.8 manager: pip platform: osx-arm64 dependencies: flytekit: '*' - markdown: '*' - plotly: '*' - pandas: '*' - ipywidgets: '*' - pygments: '*' - ydata-profiling: '*' - url: https://files.pythonhosted.org/packages/1e/b5/c7b73fc22f66a24115122a6cf8c1e05ada5ac337de2ffd92e996d8fee294/flytekitplugins_deck_standard-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/d8/70/01c7ca9a4d9fbb6cae8fdb24d06cef9193a4077f43b0df2993203fd62d4d/flytekitplugins_deck_standard-1.13.8-py3-none-any.whl hash: - sha256: 71587e0dc0c7790b8634bfeaa7d7c3eebb6b4a8a9c0713c444ab9237ac9bfe3b + sha256: a0369080b47ac14d0c54e441995f10f739a4ee7b3d6cd2685a3cc9911642ab15 category: main optional: false - name: flytekitplugins-kfpytorch - version: 1.13.5 + version: 1.13.8 manager: pip platform: linux-64 dependencies: @@ -17966,13 +17977,13 @@ package: flyteidl: '>=1.5.1' flytekit: '>=1.6.1' kubernetes: '*' - url: https://files.pythonhosted.org/packages/c8/d9/f1ccfc7bcbf0bb4e32590260652921f042e074fc6376162cad89f7f1d9d3/flytekitplugins_kfpytorch-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/a5/ef/1ef72a88b1f87a782ce1edc54570951c9992f0136bfcfba65d701df12235/flytekitplugins_kfpytorch-1.13.8-py3-none-any.whl hash: - sha256: c1c2d85e1f7c2ec53a29447093a27f453764d62716d3058c5be1a1315d4abfe1 + sha256: 42e5a1fa42fc5c833b0881ff76508a79359cbdbd0a0a3e50c4fc5f2ebac8bf9f category: main optional: false - name: flytekitplugins-kfpytorch - version: 1.13.5 + version: 1.13.8 manager: pip platform: osx-arm64 dependencies: @@ -17980,35 +17991,35 @@ package: flyteidl: '>=1.5.1' flytekit: '>=1.6.1' kubernetes: '*' - url: https://files.pythonhosted.org/packages/c8/d9/f1ccfc7bcbf0bb4e32590260652921f042e074fc6376162cad89f7f1d9d3/flytekitplugins_kfpytorch-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/a5/ef/1ef72a88b1f87a782ce1edc54570951c9992f0136bfcfba65d701df12235/flytekitplugins_kfpytorch-1.13.8-py3-none-any.whl hash: - sha256: c1c2d85e1f7c2ec53a29447093a27f453764d62716d3058c5be1a1315d4abfe1 + sha256: 42e5a1fa42fc5c833b0881ff76508a79359cbdbd0a0a3e50c4fc5f2ebac8bf9f category: main optional: false - name: flytekitplugins-sqlalchemy - version: 1.13.5 + version: 1.13.8 manager: pip platform: linux-64 dependencies: flytekit: '>=1.3.0b2,<2.0.0' sqlalchemy: '>=1.4.7' pandas: '*' - url: https://files.pythonhosted.org/packages/19/aa/f4e28c0d6a1cbba9c751aa7da12518d3a47aec5b6b19ed49dcb0678dc70e/flytekitplugins_sqlalchemy-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/3e/0e/b557a3809e678fe1c1d76237f491b2dcdcaf81bc0348dcfe05260460df5d/flytekitplugins_sqlalchemy-1.13.8-py3-none-any.whl hash: - sha256: 082e36be99903eb3e019c8e9c95844329304a503ba3cabd1a2fdb4923bb44983 + sha256: 1d79b9cafbcb93cb9d6cc9e8e0ca6e1e4a1c46a33940586e88984aa84cdbec01 category: main optional: false - name: flytekitplugins-sqlalchemy - version: 1.13.5 + version: 1.13.8 manager: pip platform: osx-arm64 dependencies: flytekit: '>=1.3.0b2,<2.0.0' sqlalchemy: '>=1.4.7' pandas: '*' - url: https://files.pythonhosted.org/packages/19/aa/f4e28c0d6a1cbba9c751aa7da12518d3a47aec5b6b19ed49dcb0678dc70e/flytekitplugins_sqlalchemy-1.13.5-py3-none-any.whl + url: https://files.pythonhosted.org/packages/3e/0e/b557a3809e678fe1c1d76237f491b2dcdcaf81bc0348dcfe05260460df5d/flytekitplugins_sqlalchemy-1.13.8-py3-none-any.whl hash: - sha256: 082e36be99903eb3e019c8e9c95844329304a503ba3cabd1a2fdb4923bb44983 + sha256: 1d79b9cafbcb93cb9d6cc9e8e0ca6e1e4a1c46a33940586e88984aa84cdbec01 category: main optional: false - name: gcsfs @@ -18045,18 +18056,6 @@ package: sha256: f3ab9d3bedc45da8cf40baed7c3a1e1694e8f599160d9138d78f0ef25e4a3ca1 category: main optional: false -- name: google-auth-oauthlib - version: 1.2.1 - manager: pip - platform: linux-64 - dependencies: - google-auth: '>=2.15.0' - requests-oauthlib: '>=0.7.0' - url: https://files.pythonhosted.org/packages/1a/8e/22a28dfbd218033e4eeaf3a0533b2b54852b6530da0c0fe934f0cc494b29/google_auth_oauthlib-1.2.1-py2.py3-none-any.whl - hash: - sha256: 2d58a27262d55aa1b87678c3ba7142a080098cbc2024f903c62355deb235d91f - category: main - optional: false - name: google-cloud version: 0.34.0 manager: pip @@ -18158,25 +18157,23 @@ package: category: main optional: false - name: isodate - version: 0.6.1 + version: 0.7.2 manager: pip platform: linux-64 - dependencies: - six: '*' - url: https://files.pythonhosted.org/packages/b6/85/7882d311924cbcfc70b1890780763e36ff0b140c7e51c110fc59a532f087/isodate-0.6.1-py2.py3-none-any.whl + dependencies: {} + url: https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl hash: - sha256: 0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96 + sha256: 28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15 category: main optional: false - name: isodate - version: 0.6.1 + version: 0.7.2 manager: pip platform: osx-arm64 - dependencies: - six: '*' - url: https://files.pythonhosted.org/packages/b6/85/7882d311924cbcfc70b1890780763e36ff0b140c7e51c110fc59a532f087/isodate-0.6.1-py2.py3-none-any.whl + dependencies: {} + url: https://files.pythonhosted.org/packages/15/aa/0aca39a37d3c7eb941ba736ede56d689e7be91cab5d9ca846bde3999eba6/isodate-0.7.2-py3-none-any.whl hash: - sha256: 0751eece944162659049d35f4f549ed815792b38793f07cf73381c1c87cbed96 + sha256: 28009937d8031054830160fce6d409ed342816b543597cece116d966c6d99e15 category: main optional: false - name: jaraco.classes @@ -18224,25 +18221,25 @@ package: category: main optional: false - name: jaraco.functools - version: 4.0.2 + version: 4.1.0 manager: pip platform: linux-64 dependencies: more-itertools: '*' - url: https://files.pythonhosted.org/packages/b1/54/7623e24ffc63730c3a619101361b08860c6b7c7cfc1aef6edb66d80ed708/jaraco.functools-4.0.2-py3-none-any.whl + url: https://files.pythonhosted.org/packages/9f/4f/24b319316142c44283d7540e76c7b5a6dbd5db623abd86bb7b3491c21018/jaraco.functools-4.1.0-py3-none-any.whl hash: - sha256: c9d16a3ed4ccb5a889ad8e0b7a343401ee5b2a71cee6ed192d3f68bc351e94e3 + sha256: ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649 category: main optional: false - name: jaraco.functools - version: 4.0.2 + version: 4.1.0 manager: pip platform: osx-arm64 dependencies: more-itertools: '*' - url: https://files.pythonhosted.org/packages/b1/54/7623e24ffc63730c3a619101361b08860c6b7c7cfc1aef6edb66d80ed708/jaraco.functools-4.0.2-py3-none-any.whl + url: https://files.pythonhosted.org/packages/9f/4f/24b319316142c44283d7540e76c7b5a6dbd5db623abd86bb7b3491c21018/jaraco.functools-4.1.0-py3-none-any.whl hash: - sha256: c9d16a3ed4ccb5a889ad8e0b7a343401ee5b2a71cee6ed192d3f68bc351e94e3 + sha256: ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649 category: main optional: false - name: jeepney @@ -18531,6 +18528,36 @@ package: sha256: 19056886fd8fa71863fa32f0eb090267f21fb74be00f19f5c70b2e9d76c883c6 category: main optional: false +- name: perian + version: 0.2.9 + manager: pip + platform: linux-64 + dependencies: + aenum: '>=3.1.11' + pydantic: '>=2.5,<3.0' + python-dateutil: '>=2.8.2' + toml: '>=0.10.2,<0.11.0' + urllib3: '>=1.25.3' + url: https://files.pythonhosted.org/packages/0a/2c/03c2d33c51dc6ef456b9bb0976dbd518a41d966fd1c5bcc130b52759e026/perian-0.2.9-py3-none-any.whl + hash: + sha256: 51072e60cb886a7d33e29565e4bef7a0c6e465d8d090c78c5e643f8af0e97d26 + category: main + optional: false +- name: perian + version: 0.2.9 + manager: pip + platform: osx-arm64 + dependencies: + aenum: '>=3.1.11' + pydantic: '>=2.5,<3.0' + python-dateutil: '>=2.8.2' + toml: '>=0.10.2,<0.11.0' + urllib3: '>=1.25.3' + url: https://files.pythonhosted.org/packages/0a/2c/03c2d33c51dc6ef456b9bb0976dbd518a41d966fd1c5bcc130b52759e026/perian-0.2.9-py3-none-any.whl + hash: + sha256: 51072e60cb886a7d33e29565e4bef7a0c6e465d8d090c78c5e643f8af0e97d26 + category: main + optional: false - name: phik version: 0.12.4 manager: pip @@ -18825,28 +18852,6 @@ package: sha256: 3a7dc7acae4358af8e8dfb693e82a8477f9f2c847de5d44cf65fee75752eaca3 category: main optional: false -- name: s3transfer - version: 0.10.2 - manager: pip - platform: linux-64 - dependencies: - botocore: '>=1.33.2,<2.0a.0' - url: https://files.pythonhosted.org/packages/3c/4a/b221409913760d26cf4498b7b1741d510c82d3ad38381984a3ddc135ec66/s3transfer-0.10.2-py3-none-any.whl - hash: - sha256: eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69 - category: main - optional: false -- name: s3transfer - version: 0.10.2 - manager: pip - platform: osx-arm64 - dependencies: - botocore: '>=1.33.2,<2.0a.0' - url: https://files.pythonhosted.org/packages/3c/4a/b221409913760d26cf4498b7b1741d510c82d3ad38381984a3ddc135ec66/s3transfer-0.10.2-py3-none-any.whl - hash: - sha256: eca1c20de70a39daee580aef4986996620f365c4e0fda6a86100231d62f1bf69 - category: main - optional: false - name: seaborn version: 0.13.2 manager: pip @@ -18885,60 +18890,6 @@ package: sha256: f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99 category: main optional: false -- name: sphinx - version: 7.3.7 - manager: pip - platform: linux-64 - dependencies: - sphinxcontrib-applehelp: '*' - sphinxcontrib-devhelp: '*' - sphinxcontrib-jsmath: '*' - sphinxcontrib-htmlhelp: '>=2.0.0' - sphinxcontrib-serializinghtml: '>=1.1.9' - sphinxcontrib-qthelp: '*' - jinja2: '>=3.0' - pygments: '>=2.14' - docutils: '>=0.18.1,<0.22' - snowballstemmer: '>=2.0' - babel: '>=2.9' - alabaster: '>=0.7.14,<0.8.0' - imagesize: '>=1.3' - requests: '>=2.25.0' - packaging: '>=21.0' - importlib-metadata: '>=4.8' - tomli: '>=2' - url: https://files.pythonhosted.org/packages/b4/fa/130c32ed94cf270e3d0b9ded16fb7b2c8fea86fa7263c29a696a30c1dde7/sphinx-7.3.7-py3-none-any.whl - hash: - sha256: 413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3 - category: main - optional: false -- name: sphinx - version: 7.3.7 - manager: pip - platform: osx-arm64 - dependencies: - sphinxcontrib-applehelp: '*' - sphinxcontrib-devhelp: '*' - sphinxcontrib-jsmath: '*' - sphinxcontrib-htmlhelp: '>=2.0.0' - sphinxcontrib-serializinghtml: '>=1.1.9' - sphinxcontrib-qthelp: '*' - jinja2: '>=3.0' - pygments: '>=2.14' - docutils: '>=0.18.1,<0.22' - snowballstemmer: '>=2.0' - babel: '>=2.9' - alabaster: '>=0.7.14,<0.8.0' - imagesize: '>=1.3' - requests: '>=2.25.0' - packaging: '>=21.0' - importlib-metadata: '>=4.8' - tomli: '>=2' - url: https://files.pythonhosted.org/packages/b4/fa/130c32ed94cf270e3d0b9ded16fb7b2c8fea86fa7263c29a696a30c1dde7/sphinx-7.3.7-py3-none-any.whl - hash: - sha256: 413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3 - category: main - optional: false - name: sphinx-code-include version: 1.4.0 manager: pip @@ -18966,25 +18917,25 @@ package: category: main optional: false - name: sphinx-docsearch - version: 0.0.7 + version: 0.1.0 manager: pip platform: linux-64 dependencies: - sphinx: '>=7.2,<7.4' - url: https://files.pythonhosted.org/packages/9a/0a/58b83b80e134977e41913a4041c0637313d5f83ed27f7679a376988d584c/sphinx_docsearch-0.0.7-py3-none-any.whl + sphinx: '>=7.2,<7.5' + url: https://files.pythonhosted.org/packages/2b/44/4e59ba1820e8190d1cb3bceb491d875da5f79fb3f61f5c2fe82037c3546e/sphinx_docsearch-0.1.0-py3-none-any.whl hash: - sha256: 53ee7c669e82a72156e694128b7737d6c5fc481e09ae642a6e63604a9018a8fb + sha256: 799221b0b962e3d86d0e0f084d4998c3d9227ef0eb2883d70e41d6bd08b616dd category: main optional: false - name: sphinx-docsearch - version: 0.0.7 + version: 0.1.0 manager: pip platform: osx-arm64 dependencies: - sphinx: '>=7.2,<7.4' - url: https://files.pythonhosted.org/packages/9a/0a/58b83b80e134977e41913a4041c0637313d5f83ed27f7679a376988d584c/sphinx_docsearch-0.0.7-py3-none-any.whl + sphinx: '>=7.2,<7.5' + url: https://files.pythonhosted.org/packages/2b/44/4e59ba1820e8190d1cb3bceb491d875da5f79fb3f61f5c2fe82037c3546e/sphinx_docsearch-0.1.0-py3-none-any.whl hash: - sha256: 53ee7c669e82a72156e694128b7737d6c5fc481e09ae642a6e63604a9018a8fb + sha256: 799221b0b962e3d86d0e0f084d4998c3d9227ef0eb2883d70e41d6bd08b616dd category: main optional: false - name: sphinx-markdown-tables @@ -19074,7 +19025,7 @@ package: category: main optional: false - name: statsmodels - version: 0.14.3 + version: 0.14.4 manager: pip platform: linux-64 dependencies: @@ -19083,13 +19034,13 @@ package: pandas: '>=1.4,<2.1.0 || >2.1.0' patsy: '>=0.5.6' packaging: '>=21.3' - url: https://files.pythonhosted.org/packages/38/d2/5c6a7f11306d25ab10dbade224f55d1a0d492c48292aed65b351b89a0abe/statsmodels-0.14.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl + url: https://files.pythonhosted.org/packages/68/8b/c640e4a243b59fc75e566ff3509ae55fb6cd4535643494be834c7d69c25d/statsmodels-0.14.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl hash: - sha256: 1ddbd07b7d05e16d1a2ea6df3d7e2255dfb3e0363b91d859623d9fc3aff32b4a + sha256: 6f43da7957e00190104c5dd0f661bfc6dfc68b87313e3f9c4dbd5e7d222e0aeb category: main optional: false - name: statsmodels - version: 0.14.3 + version: 0.14.4 manager: pip platform: osx-arm64 dependencies: @@ -19098,9 +19049,29 @@ package: pandas: '>=1.4,<2.1.0 || >2.1.0' patsy: '>=0.5.6' packaging: '>=21.3' - url: https://files.pythonhosted.org/packages/b1/4d/18f0a224157fc84dae82fe8ca1a26f892f44211356068492c58a869f1466/statsmodels-0.14.3-cp39-cp39-macosx_11_0_arm64.whl + url: https://files.pythonhosted.org/packages/dc/02/df44d1a73368fd0c0618e3169e7649303e6adb3ce96a429b617549f87165/statsmodels-0.14.4-cp39-cp39-macosx_11_0_arm64.whl + hash: + sha256: d330da34f59f1653c5193f9fe3a3a258977c880746db7f155fc33713ea858db5 + category: main + optional: false +- name: toml + version: 0.10.2 + manager: pip + platform: linux-64 + dependencies: {} + url: https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl + hash: + sha256: 806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b + category: main + optional: false +- name: toml + version: 0.10.2 + manager: pip + platform: osx-arm64 + dependencies: {} + url: https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl hash: - sha256: 97f28958e456aea788d4ffd83d7ade82d2a4a3bd5c7e8eabf791f224cddef2bf + sha256: 806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b category: main optional: false - name: typed-ast diff --git a/monodocs-environment.yaml b/monodocs-environment.yaml index 20673c5514..717a1523c2 100644 --- a/monodocs-environment.yaml +++ b/monodocs-environment.yaml @@ -78,10 +78,11 @@ dependencies: - whylabs-client # whylogs - ray==2.6.3 - duckdb - - aioboto3>=12.3.0 # aws sagemaker inference + - aioboto3>=12.3.0 # aws sagemaker inference - databricks-cli # mlflow - sphinx-docsearch - pydata_sphinx_theme + - perian # perian platforms: - linux-64 diff --git a/rfc/system/0008-community-plugins.md b/rfc/system/0008-community-plugins.md new file mode 100644 index 0000000000..0283657c87 --- /dev/null +++ b/rfc/system/0008-community-plugins.md @@ -0,0 +1,59 @@ +# Management of community-contributed plugins + +**Authors:** + +- @davidmirror-ops + + +## 1 Executive Summary + +The Flyte community as a self-governed and productive collective of individuals, values contributions. This proposal aims to discuss the process community contributors should follow to submit a new `flytekit` plugin, with special attention to mechanisms that ensure stability and maintainability of core flyte code. + +## 2 Motivation + +- With the current "in-tree" approach, plugins developed by the community land in the `flytekit` repo ([example](https://github.com/flyteorg/flytekit/pull/2537)). It results in Flyte maintainers having to take care of CI test failures due to plugin code or flytekit updates incompatible with plugin code, etc. Flyte maintainers are also expected to provide support about and fix bugs in plugins integrating 3rd party libraries that they might have little knowledge off. + +- The goal is to agree on a process for contributors to follow when submitting new integrations in a "out-of-tree" way that clearly communicates that it is a community-contributed -and then- community-supported integration. + +## 3 Proposed Implementation + +- Create a `community` folder under `flytekit/plugins` and keep releasing the plugins in that folder as separate `pypi` packages. +- Configure CI to only run tests on `plugins/community` when there are changes to a respective plugin. +- Keep releasing community plugins alongside flytekit, even if there are no changes. +- Explicitly mark plugins as community maintained in the import via `import flytekitplugins.contrib.x` +- Plugin authors are responsible for maintaining their plugins. In case there are PRs to change a community plugin, the plugin maintainers review the PR and give a non-binding approval. Once a community plugin maintainer has given a non-binding approval, a `flytekit` maintainer has to give a binding approval in order for the PR to be merged. + +This proposal includes agent plugins. +### Promotion process to official plugin + +An official plugin is one that is maintained by the core Flyte team and is made part of the official `flytekit` documentation. + +- Plugin maintainers or community members can propose the promotion of a plugin to official by creating an Issue on the `flytekit` repo. +- The supermajority of the TSC must approve publicly before promoting a plugin. + +To consider it for promotion, a plugin must meet the following criteria: + +- Production readiness testing performed by the core Flyte team or documented by plugin users or maintainers +- Evidence of ongoing usage through Github issues or Slack threads +- Documented in flytekit's documentation + + + +## 4 Drawbacks + +- Potential overhead: CI configuration changes in flytekit (probably a one-time change) + +## 5 Alternatives + +- Maintain community plugins on a separate repo + - Against the monorepo initiative +- Have community packages be it's own org + - Significantly higher management overhead +- `flytekit` plugins built into their own package + - Potentially heavier development process + +- Adding plugin authors as CODEOWNERS won't be considered due to a [Github permission model](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) limitation: + +>The people you choose as code owners must have write permissions for the repository. + +Getting write permissions in `flytekit` via contributing plugins is not part of the [current Governance model](https://github.com/flyteorg/community/blob/main/GOVERNANCE.md#community-roles-and-path-to-maintainership) for flyte. diff --git a/script/setup_local_dev.sh b/script/setup_local_dev.sh index 9fbefeb9c1..cde45bf330 100644 --- a/script/setup_local_dev.sh +++ b/script/setup_local_dev.sh @@ -147,7 +147,7 @@ fi echo -e "\nSetting kubeconfig and kubectl context" export KUBECONFIG=$KUBECONFIG:"${K3D_KUBECONFIG_FILE_PATH}" -kubectl config set-context $K3D_CLUSTER_NAME +kubectl config set-context k3d-$K3D_CLUSTER_NAME DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )" HELM_CHART="${DIR}/../charts/flyte-deps" diff --git a/stats/flyteuser.dashboard.py b/stats/flyteuser.dashboard.py index d554532b24..763362f004 100644 --- a/stats/flyteuser.dashboard.py +++ b/stats/flyteuser.dashboard.py @@ -1,6 +1,6 @@ import typing from grafanalib.core import ( - Alert, AlertCondition, Dashboard, Graph, + Alert, AlertCondition, Dashboard, Graph,BarChart,BarGauge, GreaterThan, OP_AND, OPS_FORMAT, Row, RTYPE_SUM, SECONDS_FORMAT, SHORT_FORMAT, single_y_axis, Target, TimeRange, YAxes, YAxis, MILLISECONDS_FORMAT, Templating, Template, DataSourceInput @@ -19,11 +19,11 @@ def workflow_stats(collapse: bool) -> Row: collapse=collapse, panels=[ Graph( - title="Accepted Workflow", + title="Accepted Workflows (avg)", dataSource=DATASOURCE, targets=[ Target( - expr='sum(rate(flyte:propeller:all:workflow:accepted{project=~"$project", domain=~"$domain", wf=~"$workflow"}[5m]))', + expr='avg(flyte:propeller:all:workflow:accepted{project=~"$project", domain=~"$domain", wf=~"$workflow"})', refId='A', ), ], @@ -33,7 +33,7 @@ def workflow_stats(collapse: bool) -> Row: ), ), Graph( - title="Successful Workflow", + title="Workflow success rate", dataSource=DATASOURCE, targets=[ Target( @@ -41,13 +41,10 @@ def workflow_stats(collapse: bool) -> Row: refId='A', ), ], - yAxes=YAxes( - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ), + yAxes=single_y_axis(format=OPS_FORMAT), ), Graph( - title="Failed Workflow", + title="Workflow failure rate", dataSource=DATASOURCE, targets=[ Target( @@ -55,105 +52,85 @@ def workflow_stats(collapse: bool) -> Row: refId='A', ), ], - yAxes=YAxes( - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ), + yAxes=single_y_axis(format=OPS_FORMAT), ), Graph( - title="Aborted Workflow", + title="Aborted Workflows (avg)", dataSource=DATASOURCE, targets=[ Target( - expr='sum(rate(flyte:propeller:all:workflow:workflow_aborted{project=~"$project", domain=~"$domain", wf=~"$workflow"}[5m]))', + expr='avg_over_time(flyte:propeller:all:workflow:workflow_aborted{project=~"$project", domain=~"$domain", wf=~"$workflow"}[5m])', refId='A', ), ], - yAxes=YAxes( - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ), + yAxes=single_y_axis(format=SHORT_FORMAT), ), - Graph( - title="Successful workflow execution time by Quantile", + BarGauge( + title="Successful wf execution duration by quantile", dataSource=DATASOURCE, targets=[ Target( - expr='sum(flyte:propeller:all:workflow:success_duration_ms{project=~"$project", domain=~"$domain", wf=~"$workflow"}) by (quantile)', + expr='(avg(flyte:propeller:all:workflow:success_duration_ms{project=~"$project", domain=~"$domain", wf=~"$workflow"}) by(quantile))/1000', refId='A', ), ], - yAxes=single_y_axis(format=MILLISECONDS_FORMAT), + orientation='horizontal', + format=SECONDS_FORMAT, ), - Graph( - title="Failed workflow execution time by Quantile", + BarGauge( + title="Failed wf execution duration by quantile", dataSource=DATASOURCE, targets=[ Target( - expr='sum(flyte:propeller:all:workflow:failure_duration_ms{project=~"$project", domain=~"$domain", wf=~"$workflow"}) by (quantile)', + expr='(avg(flyte:propeller:all:workflow:failure_duration_ms{project=~"$project", domain=~"$domain", wf=~"$workflow"}) by(quantile))/1000', refId='A', ), ], - yAxes=single_y_axis(format=MILLISECONDS_FORMAT), - ), - Graph( - title="Node queuing latency by Quantile", - dataSource=DATASOURCE, - targets=[ - Target( - expr='sum(flyte:propeller:all:node:queueing_latency_ms{project=~"$project", domain=~"$domain", wf=~"$workflow"}) by (quantile)', - refId='A', - ), - ], - yAxes=single_y_axis(format=MILLISECONDS_FORMAT), + orientation='horizontal', + format=SECONDS_FORMAT, ), ]) @staticmethod def quota_stats(collapse: bool) -> Row: return Row( - title="Kubernetes Quota Usage stats", + title="Kubernetes Resource Quota Usage", collapse=collapse, panels=[ Graph( - title="CPU Limits vs usage", + title="CPU Limit vs requested by namespace", dataSource=DATASOURCE, targets=[ Target( expr='kube_resourcequota{resource="limits.cpu", namespace="$project-$domain", type="hard"}', refId='A', - legendFormat="max cpu", + legendFormat="CPU limit", ), Target( expr='kube_resourcequota{resource="limits.cpu", namespace="$project-$domain", type="used"}', refId='B', - legendFormat="used cpu", + legendFormat="CPU requested", ), ], yAxes=YAxes( - YAxis(format=OPS_FORMAT), YAxis(format=SHORT_FORMAT), ), ), Graph( - title="Mem Limits vs usage", + title="Memory limit vs requested by namespace (MiB)", dataSource=DATASOURCE, targets=[ Target( - expr='kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="hard"}', + expr='(kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="hard"})*9.5367e-7', refId='A', - legendFormat="max mem", + legendFormat="Memory limit (MiB)", ), Target( - expr='kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="used"}', + expr='(kube_resourcequota{resource="limits.memory", namespace="$project-$domain", type="used"})*9.5367e-7', refId='B', - legendFormat="used mem", + legendFormat="Memory requested (MiB)", ), ], - yAxes=YAxes( - YAxis(format=OPS_FORMAT), - YAxis(format=SHORT_FORMAT), - ), ), ]) @@ -164,48 +141,48 @@ def resource_stats(collapse: bool) -> Row: collapse=collapse, panels=[ Graph( - title="Pending tasks", + title="Pending Tasks", dataSource=DATASOURCE, targets=[ Target( - expr='sum(kube_pod_container_status_waiting * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"}) by (namespace, label_execution_id, label_task_name, label_node_id, label_workflow_name) > 0', + expr='sum(kube_pod_status_phase{phase="Pending"} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_workflow_name=~"$workflow"}) by (namespace, label_task_name, label_node_id, label_workflow_name) > 0', refId='A', ), ], yAxes=single_y_axis(format=SHORT_FORMAT), ), - Graph( - title="Memory Usage Percentage", + BarChart( + title="Memory Usage per Task(%)", dataSource=DATASOURCE, targets=[ Target( - expr='(100 * max(container_memory_rss{image!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / max(kube_pod_container_resource_limits_memory_bytes{container!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=""} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0', + expr='(100 * (max(container_memory_working_set_bytes{container!=""} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / max(cluster:namespace:pod_memory:active:kube_pod_container_resource_limits{container!=""} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0', refId='A', ), ], - yAxes=single_y_axis(format=SHORT_FORMAT), + showValue='true', ), - Graph( - title="CPU Usage Percentage", + BarChart( + title="CPU Usage per Task(%)", dataSource=DATASOURCE, targets=[ Target( - expr='(100* sum(rate(container_cpu_usage_seconds_total{image!=""}[2m]) * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !="",namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name) / sum(kube_pod_container_resource_limits_cpu_cores{container!=""} * on(pod) group_left(label_execution_id, label_task_name, label_node_id, label_workflow_name) kube_pod_labels{label_execution_id !=""} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_execution_id, label_task_name, label_node_id, label_workflow_name)) > 0', + expr='(100 * (sum(rate(container_cpu_usage_seconds_total{image!=""}[2m]) * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels{namespace=~"$project-$domain",label_workflow_name=~"$workflow"} * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name) / sum(cluster:namespace:pod_cpu:active:kube_pod_container_resource_limits{container!=""} * on(pod) group_left(label_task_name, label_node_id, label_workflow_name) kube_pod_labels * on(pod) group_left(phase) kube_pod_status_phase{phase="Running"}) by (namespace, pod, label_task_name, label_node_id, label_workflow_name))) > 0', refId='A', ), ], - yAxes=single_y_axis(format=SHORT_FORMAT), + showValue='true', ), ]) @staticmethod def errors(collapse: bool) -> Row: return Row( - title="Error (System vs user)", + title="Error (System vs User)", collapse=collapse, panels=[ Graph( - title="User errors", + title="User error rate", dataSource=DATASOURCE, targets=[ Target( @@ -216,7 +193,7 @@ def errors(collapse: bool) -> Row: yAxes=single_y_axis(format=SHORT_FORMAT), ), Graph( - title="System errors", + title="System error rate", dataSource=DATASOURCE, targets=[ Target( @@ -280,7 +257,7 @@ def create_all_rows(interval: int) -> typing.List[Row]: domain_template, wf_template, ]), - description="Flyte User Dashboard. This is great to get a birds-eye and drill down view of executions in your Flyte cluster. Useful for the user.", + description="Flyte User Dashboard. It's designed to give an overview of execution status and resource consumption.", ).auto_panel_ids() if __name__ == "__main__":