From 84bcc26b118d8109ae020df5990cf283a09ddd8f Mon Sep 17 00:00:00 2001 From: Jason Parraga Date: Thu, 31 Oct 2024 09:27:42 -0700 Subject: [PATCH 01/18] Add basic SASL and TLS support for Kafka cloud events (#5814) * Add basic SASL and TLS support for Kafka cloud events Signed-off-by: Jason Parraga * Address comments Signed-off-by: Jason Parraga --------- Signed-off-by: Jason Parraga --- charts/flyte-core/README.md | 17 +++- charts/flyte-core/values.yaml | 39 ++++++++- .../manifests/complete-agent.yaml | 9 +- .../sandbox-bundled/manifests/complete.yaml | 9 +- docker/sandbox-bundled/manifests/dev.yaml | 4 +- flyteadmin/.golangci.yml | 2 + flyteadmin/pkg/async/cloudevent/factory.go | 7 +- .../interfaces/application_configuration.go | 85 +++++++++++++++++++ 8 files changed, 154 insertions(+), 18 deletions(-) diff --git a/charts/flyte-core/README.md b/charts/flyte-core/README.md index ba0d2a792d..6aed892810 100644 --- a/charts/flyte-core/README.md +++ b/charts/flyte-core/README.md @@ -55,10 +55,25 @@ helm install gateway bitnami/contour -n flyte | Key | Type | Default | Description | |-----|------|---------|-------------| -| cloud_events.aws.region | string | `"us-east-2"` | | +| cloud_events.aws | object | `{"region":"us-east-2"}` | Configuration for sending cloud events to AWS SNS | | cloud_events.enable | bool | `false` | | | cloud_events.eventsPublisher.eventTypes[0] | string | `"all"` | | | cloud_events.eventsPublisher.topicName | string | `"arn:aws:sns:us-east-2:123456:123-my-topic"` | | +| cloud_events.gcp | object | `{"region":"us-east1"}` | Configuration for sending cloud events to GCP Pub Sub | +| cloud_events.kafka | object | `{"brokers":["mybroker:443"],"saslConfig":{"enabled":false,"handshake":true,"mechanism":"PLAIN","password":"","passwordPath":"","user":"kafka"},"tlsConfig":{"certPath":"/etc/ssl/certs/kafka-client.crt","enabled":false,"keyPath":"/etc/ssl/certs/kafka-client.key"},"version":"3.7.0"}` | Configuration for sending cloud events to Kafka | +| cloud_events.kafka.brokers | list | `["mybroker:443"]` | The kafka brokers to talk to | +| cloud_events.kafka.saslConfig | object | `{"enabled":false,"handshake":true,"mechanism":"PLAIN","password":"","passwordPath":"","user":"kafka"}` | SASL based authentication | +| cloud_events.kafka.saslConfig.enabled | bool | `false` | Whether to use SASL authentication | +| cloud_events.kafka.saslConfig.handshake | bool | `true` | Whether the send the SASL handsahke first | +| cloud_events.kafka.saslConfig.mechanism | string | `"PLAIN"` | Which SASL mechanism to use. Defaults to PLAIN | +| cloud_events.kafka.saslConfig.password | string | `""` | The password for the kafka user | +| cloud_events.kafka.saslConfig.passwordPath | string | `""` | Optional mount path of file containing the kafka password. | +| cloud_events.kafka.saslConfig.user | string | `"kafka"` | The kafka user | +| cloud_events.kafka.tlsConfig | object | `{"certPath":"/etc/ssl/certs/kafka-client.crt","enabled":false,"keyPath":"/etc/ssl/certs/kafka-client.key"}` | Certificate based authentication | +| cloud_events.kafka.tlsConfig.certPath | string | `"/etc/ssl/certs/kafka-client.crt"` | Path to the client certificate | +| cloud_events.kafka.tlsConfig.enabled | bool | `false` | Whether to use certificate based authentication or TLS | +| cloud_events.kafka.tlsConfig.keyPath | string | `"/etc/ssl/certs/kafka-client.key"` | Path to the client private key | +| cloud_events.kafka.version | string | `"3.7.0"` | The version of Kafka | | cloud_events.type | string | `"aws"` | | | cluster_resource_manager | object | `{"config":{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}},"enabled":true,"nodeSelector":{},"podAnnotations":{},"podEnv":{},"podLabels":{},"prometheus":{"enabled":false,"path":"/metrics","port":10254},"resources":{},"service_account_name":"flyteadmin","standaloneDeployment":false,"templates":[{"key":"aa_namespace","value":"apiVersion: v1\nkind: Namespace\nmetadata:\n name: {{ namespace }}\nspec:\n finalizers:\n - kubernetes\n"},{"key":"ab_project_resource_quota","value":"apiVersion: v1\nkind: ResourceQuota\nmetadata:\n name: project-quota\n namespace: {{ namespace }}\nspec:\n hard:\n limits.cpu: {{ projectQuotaCpu }}\n limits.memory: {{ projectQuotaMemory }}\n"}]}` | Configuration for the Cluster resource manager component. This is an optional component, that enables automatic cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain | | cluster_resource_manager.config | object | `{"cluster_resources":{"customData":[{"production":[{"projectQuotaCpu":{"value":"5"}},{"projectQuotaMemory":{"value":"4000Mi"}}]},{"staging":[{"projectQuotaCpu":{"value":"2"}},{"projectQuotaMemory":{"value":"3000Mi"}}]},{"development":[{"projectQuotaCpu":{"value":"4"}},{"projectQuotaMemory":{"value":"3000Mi"}}]}],"refreshInterval":"5m","standaloneDeployment":false,"templatePath":"/etc/flyte/clusterresource/templates"}}` | Configmap for ClusterResource parameters | diff --git a/charts/flyte-core/values.yaml b/charts/flyte-core/values.yaml index 2eb9ff876a..9faaed731a 100755 --- a/charts/flyte-core/values.yaml +++ b/charts/flyte-core/values.yaml @@ -946,15 +946,48 @@ external_events: # an SNS topic (or gcp equivalent) cloud_events: enable: false - type: aws - aws: - region: us-east-2 eventsPublisher: # Make sure this is not a fifo queue. Admin does not yet support # writing to fifo sns topics. topicName: "arn:aws:sns:us-east-2:123456:123-my-topic" eventTypes: - all # Or workflow, node, task. Or "*" + type: aws + # -- Configuration for sending cloud events to AWS SNS + aws: + region: us-east-2 + # -- Configuration for sending cloud events to GCP Pub Sub + gcp: + region: us-east1 + # -- Configuration for sending cloud events to Kafka + kafka: + # -- The version of Kafka + version: "3.7.0" + # -- The kafka brokers to talk to + brokers: + - mybroker:443 + # -- SASL based authentication + saslConfig: + # -- Whether to use SASL authentication + enabled: false + # -- The kafka user + user: kafka + # -- The password for the kafka user + password: "" + # -- Optional mount path of file containing the kafka password. + passwordPath: "" + # -- Whether the send the SASL handsahke first + handshake: true + # -- Which SASL mechanism to use. Defaults to PLAIN + mechanism: PLAIN + # -- Certificate based authentication + tlsConfig: + # -- Whether to use certificate based authentication or TLS + enabled: false + # -- Path to the client certificate + certPath: /etc/ssl/certs/kafka-client.crt + # -- Path to the client private key + keyPath: /etc/ssl/certs/kafka-client.key # -- Configuration for the Cluster resource manager component. This is an optional component, that enables automatic # cluster configuration. This is useful to set default quotas, manage namespaces etc that map to a project/domain diff --git a/docker/sandbox-bundled/manifests/complete-agent.yaml b/docker/sandbox-bundled/manifests/complete-agent.yaml index a460033647..028f719e71 100644 --- a/docker/sandbox-bundled/manifests/complete-agent.yaml +++ b/docker/sandbox-bundled/manifests/complete-agent.yaml @@ -444,6 +444,9 @@ data: disabled: false seedProjects: - flytesnacks + seedProjectsWithDetails: + - description: Default project setup. + name: flytesnacks dataCatalog: disabled: false propeller: @@ -816,7 +819,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: M0MwZjRDRlRMRVg5eFlNWA== + haSharedSecret: SlI1TDFkTXBMaThuc0hlSQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1247,7 +1250,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: dc6e26fec37cad413a92bf06f2840ea1e497284312275ff06e22b152dee1566b + checksum/configuration: a823eaadac5f3a4358c8acf628ebeb3719f88312af520d2c253de2579dff262d checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1413,7 +1416,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 49b88f7ed6b4bec4cdb0305c1d990514d9b75690607d7ae75d5862da9a3b2a29 + checksum/secret: ffc8aa05a602edd8f9b1d7ef35aa1cc5e383bceb9b91307eef99e86f53e13d4e labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/complete.yaml b/docker/sandbox-bundled/manifests/complete.yaml index 88cd06ac2c..c8b8e1c93a 100644 --- a/docker/sandbox-bundled/manifests/complete.yaml +++ b/docker/sandbox-bundled/manifests/complete.yaml @@ -433,6 +433,9 @@ data: disabled: false seedProjects: - flytesnacks + seedProjectsWithDetails: + - description: Default project setup. + name: flytesnacks dataCatalog: disabled: false propeller: @@ -798,7 +801,7 @@ type: Opaque --- apiVersion: v1 data: - haSharedSecret: ekx6Z2kxS3FBYjV5dExlMw== + haSharedSecret: YjdMdE9yejJzZ2xXSDFBRQ== proxyPassword: "" proxyUsername: "" kind: Secret @@ -1196,7 +1199,7 @@ spec: metadata: annotations: checksum/cluster-resource-templates: 6fd9b172465e3089fcc59f738b92b8dc4d8939360c19de8ee65f68b0e7422035 - checksum/configuration: a6f3ea502338c626b7824453ce7dc8b6fcd441d68865c075e2e74d797bc607fa + checksum/configuration: c2649df6bcb523f120c73b0fdeec5d9516f555eab12e4eae78b04dea2cf2abae checksum/configuration-secret: 09216ffaa3d29e14f88b1f30af580d02a2a5e014de4d750b7f275cc07ed4e914 labels: app.kubernetes.io/component: flyte-binary @@ -1362,7 +1365,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: 9b64bfe991cd6ce4394fa9c2651b0bbfe4834024ece293b3ac9688111d6fe5d3 + checksum/secret: 956ac1b58c049a630c94605eedaba7ba9de3fc01233701ef403ab4bf24fe2a7a labels: app: docker-registry release: flyte-sandbox diff --git a/docker/sandbox-bundled/manifests/dev.yaml b/docker/sandbox-bundled/manifests/dev.yaml index e524e13ae1..1038da1f64 100644 --- a/docker/sandbox-bundled/manifests/dev.yaml +++ b/docker/sandbox-bundled/manifests/dev.yaml @@ -499,7 +499,7 @@ metadata: --- apiVersion: v1 data: - haSharedSecret: MW90empzaUNBd2FlV09QSw== + haSharedSecret: YUpzb25xNTM1eml3Rmpueg== proxyPassword: "" proxyUsername: "" kind: Secret @@ -934,7 +934,7 @@ spec: metadata: annotations: checksum/config: 8f50e768255a87f078ba8b9879a0c174c3e045ffb46ac8723d2eedbe293c8d81 - checksum/secret: ba78cd87d2f6685980b95bd20913088b3a07fa48e9a414693277e3df134710ad + checksum/secret: 2720f13bd64051a7acb512e59e426b9f6c5f6c3c7d1d9a3a423e2df4cf9bab46 labels: app: docker-registry release: flyte-sandbox diff --git a/flyteadmin/.golangci.yml b/flyteadmin/.golangci.yml index 4dbb031812..cd180b89d1 100644 --- a/flyteadmin/.golangci.yml +++ b/flyteadmin/.golangci.yml @@ -39,3 +39,5 @@ issues: exclude-rules: - path: pkg/workflowengine/impl/prepare_execution.go text: "copies lock" + - path: pkg/runtime/interfaces/application_configuration.go + text: "G402: TLS InsecureSkipVerify may be true." diff --git a/flyteadmin/pkg/async/cloudevent/factory.go b/flyteadmin/pkg/async/cloudevent/factory.go index 65cd48de93..51c38ffea4 100644 --- a/flyteadmin/pkg/async/cloudevent/factory.go +++ b/flyteadmin/pkg/async/cloudevent/factory.go @@ -73,12 +73,7 @@ func NewCloudEventsPublisher(ctx context.Context, db repositoryInterfaces.Reposi case cloudEventImplementations.Kafka: saramaConfig := sarama.NewConfig() - var err error - saramaConfig.Version, err = sarama.ParseKafkaVersion(cloudEventsConfig.KafkaConfig.Version) - if err != nil { - logger.Fatalf(ctx, "failed to parse kafka version, %v", err) - panic(err) - } + cloudEventsConfig.KafkaConfig.UpdateSaramaConfig(ctx, saramaConfig) kafkaSender, err := kafka_sarama.NewSender(cloudEventsConfig.KafkaConfig.Brokers, saramaConfig, cloudEventsConfig.EventsPublisherConfig.TopicName) if err != nil { panic(err) diff --git a/flyteadmin/pkg/runtime/interfaces/application_configuration.go b/flyteadmin/pkg/runtime/interfaces/application_configuration.go index 15ed271412..55791a1538 100644 --- a/flyteadmin/pkg/runtime/interfaces/application_configuration.go +++ b/flyteadmin/pkg/runtime/interfaces/application_configuration.go @@ -1,6 +1,13 @@ package interfaces import ( + "context" + "crypto/tls" + "fmt" + "os" + "strings" + + "github.com/Shopify/sarama" "github.com/golang/protobuf/ptypes/wrappers" "golang.org/x/time/rate" @@ -231,11 +238,89 @@ type GCPConfig struct { ProjectID string `json:"projectId"` } +// This section holds SASL config for Kafka +type SASLConfig struct { + // Whether to use SASL + Enabled bool `json:"enabled"` + // The username + User string `json:"user"` + // The password + Password string `json:"password"` + PasswordPath string `json:"passwordPath"` + Handshake bool `json:"handshake"` + // Which SASL Mechanism to use. Defaults to PLAIN + Mechanism sarama.SASLMechanism `json:"mechanism"` +} + +// This section holds TLS config for Kafka clients +type TLSConfig struct { + // Whether to use TLS + Enabled bool `json:"enabled"` + // Whether to skip certificate verification + InsecureSkipVerify bool `json:"insecureSkipVerify"` + // The location of the client certificate + CertPath string `json:"certPath"` + // The location of the client private key + KeyPath string `json:"keyPath"` +} + +// This section holds configs for Kafka clients type KafkaConfig struct { // The version of Kafka, e.g. 2.1.0, 0.8.2.0 Version string `json:"version"` // kafka broker addresses Brokers []string `json:"brokers"` + // sasl config + SASLConfig SASLConfig `json:"saslConfig"` + // tls config + TLSConfig TLSConfig `json:"tlsConfig"` +} + +func (k KafkaConfig) UpdateSaramaConfig(ctx context.Context, s *sarama.Config) { + var err error + s.Version, err = sarama.ParseKafkaVersion(k.Version) + if err != nil { + panic(err) + } + + if k.SASLConfig.Enabled { + s.Net.SASL.Enable = true + s.Net.SASL.User = k.SASLConfig.User + + if len(k.SASLConfig.PasswordPath) > 0 { + if _, err := os.Stat(k.SASLConfig.PasswordPath); os.IsNotExist(err) { + panic(fmt.Sprintf("missing kafka password at the specified path [%s]", k.SASLConfig.PasswordPath)) + } + passwordVal, err := os.ReadFile(k.SASLConfig.PasswordPath) + if err != nil { + panic(fmt.Sprintf("failed to kafka password from path [%s] with err: %v", k.SASLConfig.PasswordPath, err)) + } + + s.Net.SASL.Password = strings.TrimSpace(string(passwordVal)) + } else { + s.Net.SASL.Password = k.SASLConfig.Password + } + s.Net.SASL.Handshake = k.SASLConfig.Handshake + + if k.SASLConfig.Mechanism == "" { + k.SASLConfig.Mechanism = sarama.SASLTypePlaintext + } + s.Net.SASL.Mechanism = k.SASLConfig.Mechanism + } + + if k.TLSConfig.Enabled { + s.Net.TLS.Enable = true + s.Net.TLS.Config = &tls.Config{ + InsecureSkipVerify: k.TLSConfig.InsecureSkipVerify, + } + if k.TLSConfig.KeyPath != "" && k.TLSConfig.CertPath != "" { + cert, err := tls.LoadX509KeyPair(k.TLSConfig.CertPath, k.TLSConfig.KeyPath) + if err != nil { + panic(err) + } + s.Net.TLS.Config.Certificates = []tls.Certificate{cert} + } + } } // This section holds configuration for the event scheduler used to schedule workflow executions. From 7b8696ef201febac8443baa19ff9d9082725dc39 Mon Sep 17 00:00:00 2001 From: Peeter Piegaze <1153481+ppiegaze@users.noreply.github.com> Date: Thu, 31 Oct 2024 18:02:02 +0100 Subject: [PATCH 02/18] Fix broken links (#5946) Signed-off-by: Peeter Piegaze <1153481+ppiegaze@users.noreply.github.com> --- docs/deployment/configuration/general.rst | 16 ++++---- .../flytepropeller_architecture.rst | 39 ++++++++++--------- .../native_scheduler_architecture.rst | 2 +- 3 files changed, 29 insertions(+), 28 deletions(-) diff --git a/docs/deployment/configuration/general.rst b/docs/deployment/configuration/general.rst index 5db7786c9a..c68f2a8be6 100644 --- a/docs/deployment/configuration/general.rst +++ b/docs/deployment/configuration/general.rst @@ -66,7 +66,7 @@ Notice how in this example we are defining a new PodTemplate inline, which allow `V1PodSpec `__ and also define the name of the primary container, labels, and annotations. -The term compile-time here refers to the fact that the pod template definition is part of the `TaskSpec `__. +The term compile-time here refers to the fact that the pod template definition is part of the `TaskSpec `__. ******************** Runtime PodTemplates @@ -88,7 +88,7 @@ initializes a K8s informer internally to track system PodTemplate updates `aware `__ of the latest PodTemplate definitions in the K8s environment. You can find this setting in `FlytePropeller `__ -config map, which is not set by default. +config map, which is not set by default. An example configuration is: @@ -101,14 +101,14 @@ An example configuration is: image: "cr.flyte.org/flyteorg/flytecopilot:v0.0.15" start-timeout: "30s" default-pod-template-name: - + Create a PodTemplate resource ============================= -Flyte recognizes PodTemplate definitions with the ``default-pod-template-name`` at two granularities. +Flyte recognizes PodTemplate definitions with the ``default-pod-template-name`` at two granularities. 1. A system-wide configuration can be created in the same namespace that - FlytePropeller is running in (typically `flyte`). + FlytePropeller is running in (typically `flyte`). 2. PodTemplates can be applied from the same namespace that the Pod will be created in. FlytePropeller always favors the PodTemplate with the more specific namespace. For example, a Pod created in the ``flytesnacks-development`` @@ -196,7 +196,7 @@ where you start the Pod. An example PodTemplate is shown: .. code-block:: yaml - + apiVersion: v1 kind: PodTemplate metadata: @@ -220,7 +220,7 @@ In addition, the K8s plugin configuration in FlytePropeller defines the default Pod Labels, Annotations, and enables the host networking. .. code-block:: yaml - + plugins: k8s: default-labels: @@ -233,7 +233,7 @@ Pod Labels, Annotations, and enables the host networking. To construct a Pod, FlytePropeller initializes a Pod definition using the default PodTemplate. This definition is applied to the K8s plugin configuration values, and any task-specific configuration is overlaid. During the process, when lists -are merged, values are appended and when maps are merged, the values are overridden. +are merged, values are appended and when maps are merged, the values are overridden. The resultant Pod using the above default PodTemplate and K8s Plugin configuration is shown: .. code-block:: yaml diff --git a/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst b/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst index 6de45566f0..f564b9d12c 100644 --- a/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst +++ b/docs/user_guide/concepts/component_architecture/flytepropeller_architecture.rst @@ -15,18 +15,20 @@ Introduction A Flyte :ref:`workflow ` is represented as a Directed Acyclic Graph (DAG) of interconnected Nodes. Flyte supports a robust collection of Node types to ensure diverse functionality. - ``TaskNodes`` support a plugin system to externally add system integrations. -- ``BranchNodes`` allow altering the control flow during runtime; pruning downstream evaluation paths based on input. +- ``BranchNodes`` allow altering the control flow during runtime; pruning downstream evaluation paths based on input. - ``DynamicNodes`` add nodes to the DAG. - ``WorkflowNodes`` allow embedding workflows within each other. -FlytePropeller is responsible for scheduling and tracking execution of Flyte workflows. It is implemented using a K8s controller that follows the reconciler pattern. +FlytePropeller is responsible for scheduling and tracking execution of Flyte workflows. It is implemented using a K8s controller that follows the reconciler pattern. .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/common/reconciler-pattern.png In this scheme, resources are periodically evaluated and the goal is to transition from the observed state to a requested state. -In our case, workflows are the resources, whose desired stated (*workflow definition*) is expressed using Flyte's SDK. Workflows are iteratively evaluated to transition from the current state to success. During each evaluation loop, the current workflow state is established as the `phase of workflow nodes `__ and subsequent tasks, and FlytePropeller performs operations to transition this state to success. -The operations may include scheduling (or rescheduling) node executions, evaluating dynamic or branch nodes, etc. +In our case, workflows are the resources, whose desired stated (*workflow definition*) is expressed using Flyte's SDK. Workflows are iteratively evaluated to transition from the current state to success. +During each evaluation loop, the current workflow state is established as the `phase of workflow nodes `__ and subsequent tasks, +and FlytePropeller performs operations to transition this state to success. +The operations may include scheduling (or rescheduling) node executions, evaluating dynamic or branch nodes, etc. By using a simple yet robust mechanism, FlytePropeller can scale to manage a large number of concurrent workflows without significant performance degradation. @@ -43,36 +45,36 @@ FlyteAdmin is the common entry point, where initialization of FlyteWorkflow Cust FlyteWorkflow CRD / K8s Integration ----------------------------------- -Workflows in Flyte are maintained as `Custom Resource Definitions (CRDs) `__ in Kubernetes, which are stored in the backing ``etcd`` key-value store. Each workflow execution results in the creation of a new ``flyteworkflow`` CR (Custom Resource) which maintains its state for the duration of the execution. CRDs provide variable definitions to describe both resource specifications (``spec``) and status (``status``). The ``flyteworkflow`` CRD uses the ``spec`` subsection to detail the workflow DAG, embodying node dependencies, etc. +Workflows in Flyte are maintained as `Custom Resource Definitions (CRDs) `__ in Kubernetes, which are stored in the backing ``etcd`` key-value store. Each workflow execution results in the creation of a new ``flyteworkflow`` CR (Custom Resource) which maintains its state for the duration of the execution. CRDs provide variable definitions to describe both resource specifications (``spec``) and status (``status``). The ``flyteworkflow`` CRD uses the ``spec`` subsection to detail the workflow DAG, embodying node dependencies, etc. **Example** 1. Execute an `example workflow `__ on a remote Flyte cluster: -.. code-block:: bash +.. code-block:: bash pyflyte run --remote example.py training_workflow --hyperparameters '{"C": 0.4}' 2. Verify there's a new Custom Resource on the ``flytesnacks-development`` namespace (this is, the workflow belongs to the ``flytesnacks`` project and the ``development`` domain): -.. code-block:: bash +.. code-block:: bash kubectl get flyteworkflows.flyte.lyft.com -n flytesnacks-development Example output: -.. code-block:: bash +.. code-block:: bash NAME AGE - f7616dc75400f43e6920 3h42m + f7616dc75400f43e6920 3h42m 3. Describe the contents of the Custom Resource, for example the ``spec`` section: -.. code-block:: bash +.. code-block:: bash - kubectl describe flyteworkflows.flyte.lyft.com f7616dc75400f43e6920 -n flytesnacks-development + kubectl describe flyteworkflows.flyte.lyft.com f7616dc75400f43e6920 -n flytesnacks-development -.. code-block:: json +.. code-block:: json "spec": { "connections": { @@ -93,7 +95,7 @@ Example output: The status subsection tracks workflow metadata including overall workflow status, node/task phases, status/phase transition timestamps, etc. -.. code-block:: json +.. code-block:: json "status": { "dataDir": "gs://flyteontf-gcp-data-116223838137/metadata/propeller/flytesnacks-development-f7616dc75400f43e6920", @@ -123,7 +125,7 @@ The status subsection tracks workflow metadata including overall workflow status }, -K8s exposes a powerful controller/operator API that enables entities to track creation/updates over a specific resource type. FlytePropeller uses this API to track FlyteWorkflows, meaning every time an instance of the ``flyteworkflow`` CR is created/updated, the FlytePropeller instance is notified. +K8s exposes a powerful controller/operator API that enables entities to track creation/updates over a specific resource type. FlytePropeller uses this API to track FlyteWorkflows, meaning every time an instance of the ``flyteworkflow`` CR is created/updated, the FlytePropeller instance is notified. .. note:: @@ -138,7 +140,7 @@ FlytePropeller supports concurrent execution of multiple, unique workflows using The WorkQueue is a FIFO queue storing workflow ID strings that require a lookup to retrieve the FlyteWorkflow CR to ensure up-to-date status. A workflow may be added to the queue in a variety of circumstances: #. A new FlyteWorkflow CR is created or an existing instance is updated -#. The K8s Informer detects a workflow timeout or failed liveness check during its periodic resync operation on the FlyteWorkflow. +#. The K8s Informer detects a workflow timeout or failed liveness check during its periodic resync operation on the FlyteWorkflow. #. A FlytePropeller worker experiences an error during a processing loop #. The WorkflowExecutor observes a completed downstream node #. A NodeHandler observes state change and explicitly enqueues its owner. (For example, K8s pod informer observes completion of a task.) @@ -153,15 +155,15 @@ The WorkflowExecutor is responsible for handling high-level workflow operations. NodeExecutor ------------ -The NodeExecutor is executed on a single node, beginning with the workflow's start node. It traverses the workflow using a visitor pattern with a modified depth-first search (DFS), evaluating each node along the path. A few examples of node evaluation based on phase include: +The NodeExecutor is executed on a single node, beginning with the workflow's start node. It traverses the workflow using a visitor pattern with a modified depth-first search (DFS), evaluating each node along the path. A few examples of node evaluation based on phase include: * Successful nodes are skipped * Unevaluated nodes are queued for processing -* Failed nodes may be reattempted up to a configurable threshold. +* Failed nodes may be reattempted up to a configurable threshold. There are many configurable parameters to tune evaluation criteria including max parallelism which restricts the number of nodes which may be scheduled concurrently. Additionally, nodes may be retried to ensure recoverability on failure. -Go to the `Optimizing Performance `__ section for more information on how to tune Propeller parameters. +Go to the `Optimizing Performance `__ section for more information on how to tune Propeller parameters. The NodeExecutor is also responsible for linking data readers/writers to facilitate data transfer between node executions. The data transfer process occurs automatically within Flyte, using efficient K8s events rather than a polling listener pattern which incurs more overhead. Relatively small amounts of data may be passed between nodes inline, but it is more common to pass data URLs to backing storage. A component of this is writing to and checking the data cache, which facilitates the reuse of previously completed evaluations. @@ -196,4 +198,3 @@ Every operation that Propeller performs makes use of a plugin. The following dia .. image:: https://raw.githubusercontent.com/flyteorg/static-resources/main/flyte/concepts/architecture/flytepropeller_plugins_architecture.png - \ No newline at end of file diff --git a/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst b/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst index a923403625..27f6b3e344 100644 --- a/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst +++ b/docs/user_guide/concepts/component_architecture/native_scheduler_architecture.rst @@ -35,7 +35,7 @@ Components Schedule Management ------------------- -This component supports creation/activation and deactivation of schedules. Each schedule is tied to a launch plan and is versioned in a similar manner. The schedule is created or its state is changed to activated/deactivated whenever the `admin API `__ is invoked for it with `ACTIVE/INACTIVE state `__. This is done either through `flytectl `__ or through any other client that calls the GRPC API. +This component supports creation/activation and deactivation of schedules. Each schedule is tied to a launch plan and is versioned in a similar manner. The schedule is created or its state is changed to activated/deactivated whenever the `admin API `__ is invoked for it with `ACTIVE/INACTIVE state `__. This is done either through `flytectl `__ or through any other client that calls the GRPC API. The API is similar to a launchplan, ensuring that only one schedule is active for a given launchplan. From 54a4ad91e6bb6014879fd7ed4a8fea844718b2ed Mon Sep 17 00:00:00 2001 From: Siddhant Rai <47355538+siiddhantt@users.noreply.github.com> Date: Thu, 31 Oct 2024 23:42:59 +0530 Subject: [PATCH 03/18] feat: improve registration patterns docs (#5808) * Update registering_workflows.md Signed-off-by: Siddhant Rai * feat: note in registering_workflows for k8sPod.dataConfig fix Signed-off-by: Siddhant Rai * update: remove note as requested change Signed-off-by: Siddhant Rai <47355538+siiddhantt@users.noreply.github.com> Signed-off-by: Siddhant Rai * Flyte docs overhaul (phase 1) (#5772) * move flytekit and flytectl docs into API section Signed-off-by: nikki everett * switch to docsearch module and env variables Signed-off-by: nikki everett * reorganize content for pydata theme Signed-off-by: nikki everett * more docs reorganization Signed-off-by: nikki everett * switch to pydata theme Signed-off-by: nikki everett * reorganize concepts/glossary and ecosystem docs Signed-off-by: nikki everett * remove unneeded custom CSS and JS files Signed-off-by: nikki everett * add redirects Signed-off-by: nikki everett * add more redirects Signed-off-by: nikki everett * first pass at updating docs contributing guide Signed-off-by: nikki everett * remove core use cases Signed-off-by: nikki everett * more edits to docs contributing guide Signed-off-by: nikki everett * more edits to the flytesnacks contributing guide Signed-off-by: nikki everett * add content to API reference index page, use consistent title and format for API reference section titles Signed-off-by: nikki everett * rename deployment section Signed-off-by: nikki everett * reorganize sections Signed-off-by: nikki everett * fix typos Signed-off-by: nikki everett * add docsearch index name and app id Signed-off-by: nikki everett * add ref to docs contributing doc and move all docsearch stuff to env vars again Signed-off-by: nikki everett * docs overhaul: render flyteidl under the /api/ path (#5802) * fix flyteidl structure so it renders under /api/ Signed-off-by: Niels Bantilan * do not check in flyteidl docs Signed-off-by: Niels Bantilan * update gitignore and unneeded conf Signed-off-by: Niels Bantilan * add mock DOCSEARCH_API_KEY to docs test ci Signed-off-by: Niels Bantilan * add css styling (#5803) * add css styling Signed-off-by: Niels Bantilan * update logo height Signed-off-by: Niels Bantilan --------- Signed-off-by: Niels Bantilan --------- Signed-off-by: Niels Bantilan * use same icon as union docs Signed-off-by: nikki everett * sp error Signed-off-by: Niels Bantilan --------- Signed-off-by: nikki everett Signed-off-by: Niels Bantilan Co-authored-by: Niels Bantilan Signed-off-by: Siddhant Rai * [Flyte][3][flytepropeller][Attribute Access][flytectl] Binary IDL With MessagePack (#5763) Signed-off-by: Siddhant Rai * Update aws-go-sdk to v1.47.11 to support EKS Pod Identity (#5796) Signed-off-by: Siddhant Rai * Update docs/user_guide/flyte_fundamentals/registering_workflows.md Co-authored-by: Nikki Everett Signed-off-by: Siddhant Rai <47355538+siiddhantt@users.noreply.github.com> * Update registering_workflows.md Signed-off-by: Siddhant Rai <47355538+siiddhantt@users.noreply.github.com> --------- Signed-off-by: Siddhant Rai Signed-off-by: Siddhant Rai <47355538+siiddhantt@users.noreply.github.com> Signed-off-by: nikki everett Signed-off-by: Niels Bantilan Co-authored-by: Nikki Everett Co-authored-by: Niels Bantilan Co-authored-by: Future-Outlier Co-authored-by: mthemis-provenir <168411899+mthemis-provenir@users.noreply.github.com> --- docs/user_guide/flyte_fundamentals/registering_workflows.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/docs/user_guide/flyte_fundamentals/registering_workflows.md b/docs/user_guide/flyte_fundamentals/registering_workflows.md index 0e981f19dd..f178d5a53c 100644 --- a/docs/user_guide/flyte_fundamentals/registering_workflows.md +++ b/docs/user_guide/flyte_fundamentals/registering_workflows.md @@ -304,6 +304,12 @@ pyflyte --pkgs --pkgs package ... This is useful in cases where you want to register two different Flyte projects that you maintain in a single place. + +If you encounter a ``ModuleNotFoundError`` when packaging, use the `--source` option to include the correct source paths. For instance: + +```{prompt} bash $ +pyflyte --pkgs package --source ./src -f +``` ```` #### Register with `flytectl register` From a2988ba35c8c5c11616bdccf7018b9255fa8e6e7 Mon Sep 17 00:00:00 2001 From: Kevin Su Date: Thu, 31 Oct 2024 17:29:59 -0700 Subject: [PATCH 04/18] Improve literal type string representation handling (#5932) Signed-off-by: Kevin Su --- .../pkg/compiler/common/pretty_print.go | 23 ++++++++++++ .../pkg/compiler/common/pretty_print_test.go | 36 +++++++++++++++++++ .../pkg/compiler/transformers/k8s/inputs.go | 2 +- .../pkg/compiler/validators/bindings.go | 6 ++-- .../pkg/compiler/validators/condition.go | 2 +- .../pkg/compiler/validators/vars.go | 2 +- 6 files changed, 65 insertions(+), 6 deletions(-) create mode 100644 flytepropeller/pkg/compiler/common/pretty_print.go create mode 100644 flytepropeller/pkg/compiler/common/pretty_print_test.go diff --git a/flytepropeller/pkg/compiler/common/pretty_print.go b/flytepropeller/pkg/compiler/common/pretty_print.go new file mode 100644 index 0000000000..61df408a4e --- /dev/null +++ b/flytepropeller/pkg/compiler/common/pretty_print.go @@ -0,0 +1,23 @@ +package common + +import ( + "fmt" + "strings" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func LiteralTypeToStr(lt *core.LiteralType) string { + if lt == nil { + return "None" + } + if lt.GetSimple() == core.SimpleType_STRUCT { + var structure string + for k, v := range lt.GetStructure().GetDataclassType() { + structure += fmt.Sprintf("dataclass_type:{key:%v value:{%v}, ", k, LiteralTypeToStr(v)) + } + structure = strings.TrimSuffix(structure, ", ") + return fmt.Sprintf("simple: STRUCT structure{%v}", structure) + } + return lt.String() +} diff --git a/flytepropeller/pkg/compiler/common/pretty_print_test.go b/flytepropeller/pkg/compiler/common/pretty_print_test.go new file mode 100644 index 0000000000..2d875af5dd --- /dev/null +++ b/flytepropeller/pkg/compiler/common/pretty_print_test.go @@ -0,0 +1,36 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" +) + +func TestLiteralTypeToStr(t *testing.T) { + dataclassType := &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}, + Structure: &core.TypeStructure{ + DataclassType: map[string]*core.LiteralType{ + "a": { + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + }, + }, + }, + Metadata: &structpb.Struct{Fields: map[string]*structpb.Value{ + "key": {Kind: &structpb.Value_StringValue{StringValue: "a"}}, + }}, + } + assert.Equal(t, LiteralTypeToStr(nil), "None") + assert.Equal(t, LiteralTypeToStr(dataclassType), "simple: STRUCT structure{dataclass_type:{key:a value:{simple:INTEGER}}") + assert.NotEqual(t, LiteralTypeToStr(dataclassType), dataclassType.String()) + + // Test for SimpleType + simpleType := &core.LiteralType{ + Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}, + } + assert.Equal(t, LiteralTypeToStr(simpleType), "simple:INTEGER") + assert.Equal(t, LiteralTypeToStr(simpleType), simpleType.String()) +} diff --git a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go index 26f50d4ddd..2b94570c20 100644 --- a/flytepropeller/pkg/compiler/transformers/k8s/inputs.go +++ b/flytepropeller/pkg/compiler/transformers/k8s/inputs.go @@ -42,7 +42,7 @@ func validateInputs(nodeID common.NodeID, iface *core.TypedInterface, inputs cor continue } if !validators.AreTypesCastable(inputType, v.Type) { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, v.Type.String(), inputType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, inputVar, common.LiteralTypeToStr(v.Type), common.LiteralTypeToStr(inputType))) continue } diff --git a/flytepropeller/pkg/compiler/validators/bindings.go b/flytepropeller/pkg/compiler/validators/bindings.go index 53535ba260..b69dda529f 100644 --- a/flytepropeller/pkg/compiler/validators/bindings.go +++ b/flytepropeller/pkg/compiler/validators/bindings.go @@ -131,7 +131,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding // If the variable has an index. We expect param to be a collection. if v.Index != nil { if cType := param.GetType().GetCollectionType(); cType == nil { - errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, param.Type.String(), inputVar, expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(nodeID, outputVar, c.LiteralTypeToStr(param.Type), inputVar, c.LiteralTypeToStr(expectedType))) } else { sourceType = cType } @@ -164,7 +164,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding return param.GetType(), []c.NodeID{val.Promise.NodeId}, true } - errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, sourceType.String(), inputVar, expectedType.String())) + errs.Collect(errors.NewMismatchingVariablesErr(node.GetId(), outputVar, c.LiteralTypeToStr(sourceType), inputVar, c.LiteralTypeToStr(expectedType))) return nil, nil, !errs.HasErrors() } } @@ -180,7 +180,7 @@ func validateBinding(w c.WorkflowBuilder, node c.Node, nodeParam string, binding if literalType == nil { errs.Collect(errors.NewUnrecognizedValueErr(nodeID, reflect.TypeOf(val.Scalar.GetValue()).String())) } else if validateParamTypes && !AreTypesCastable(literalType, expectedType) { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, nodeParam, literalType.String(), expectedType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, nodeParam, c.LiteralTypeToStr(literalType), c.LiteralTypeToStr(expectedType))) } if expectedType.GetEnumType() != nil { diff --git a/flytepropeller/pkg/compiler/validators/condition.go b/flytepropeller/pkg/compiler/validators/condition.go index 8e202b6423..70b72cde8a 100644 --- a/flytepropeller/pkg/compiler/validators/condition.go +++ b/flytepropeller/pkg/compiler/validators/condition.go @@ -44,7 +44,7 @@ func ValidateBooleanExpression(w c.WorkflowBuilder, node c.NodeBuilder, expr *fl if op1Valid && op2Valid && op1Type != nil && op2Type != nil { if op1Type.String() != op2Type.String() { errs.Collect(errors.NewMismatchingTypesErr(node.GetId(), "RightValue", - op1Type.String(), op2Type.String())) + c.LiteralTypeToStr(op1Type), c.LiteralTypeToStr(op2Type))) } } } else if expr.GetConjunction() != nil { diff --git a/flytepropeller/pkg/compiler/validators/vars.go b/flytepropeller/pkg/compiler/validators/vars.go index 53ca67e4ee..e114dc4fc0 100644 --- a/flytepropeller/pkg/compiler/validators/vars.go +++ b/flytepropeller/pkg/compiler/validators/vars.go @@ -40,7 +40,7 @@ func validateInputVar(n c.NodeBuilder, paramName string, requireParamType bool, func validateVarType(nodeID c.NodeID, paramName string, param *flyte.Variable, expectedType *flyte.LiteralType, errs errors.CompileErrors) (ok bool) { if param.GetType().String() != expectedType.String() { - errs.Collect(errors.NewMismatchingTypesErr(nodeID, paramName, param.GetType().String(), expectedType.String())) + errs.Collect(errors.NewMismatchingTypesErr(nodeID, paramName, c.LiteralTypeToStr(param.GetType()), c.LiteralTypeToStr(expectedType))) } return !errs.HasErrors() From e5f84218b0b331d1a584c83ff31a5718865160f7 Mon Sep 17 00:00:00 2001 From: Christoph Paulik Date: Fri, 1 Nov 2024 16:09:14 +0100 Subject: [PATCH 05/18] Update propeller sharding docs - types needs to be capitalized (#5860) Using the lowercase version of e.g. `project` the following error is raised by flytepropeller-manager ``` level=error msg="\n\n1 error(s) decoding:\n\n* cannot parse 'shard.type' as int: strconv.ParseInt: parsing \"project\": invalid syntax" ``` Signed-off-by: Christoph Paulik --- docs/deployment/configuration/performance.rst | 6 +++--- flytepropeller/manager/doc.go | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/deployment/configuration/performance.rst b/docs/deployment/configuration/performance.rst index 8c9c31030d..db4af7ea98 100644 --- a/docs/deployment/configuration/performance.rst +++ b/docs/deployment/configuration/performance.rst @@ -270,7 +270,7 @@ The hash shard Strategy, denoted by ``type: Hash`` in the configuration below, u type: Hash # use the "hash" shard strategy shard-count: 4 # the total number of shards -The project and domain shard strategies, denoted by ``type: project`` and ``type: domain`` respectively, use the Flyte workflow project and domain metadata to shard Flyte workflows. These shard strategies are configured using a ``per-shard-mapping`` option, which is a list of IDs. Each element in the ``per-shard-mapping`` list defines a new shard, and the ID list assigns responsibility for the specified IDs to that shard. A shard configured as a single wildcard ID (i.e. ``*``) is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and, on that shard, there must be only one ID, namely the wildcard. +The project and domain shard strategies, denoted by ``type: Project`` and ``type: Domain`` respectively, use the Flyte workflow project and domain metadata to shard Flyte workflows. These shard strategies are configured using a ``per-shard-mapping`` option, which is a list of IDs. Each element in the ``per-shard-mapping`` list defines a new shard, and the ID list assigns responsibility for the specified IDs to that shard. A shard configured as a single wildcard ID (i.e. ``*``) is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and, on that shard, there must be only one ID, namely the wildcard. .. code-block:: yaml @@ -281,7 +281,7 @@ The project and domain shard strategies, denoted by ``type: project`` and ``type # pod and scanning configuration redacted # ... shard: - type: project # use the "project" shard strategy + type: Project # use the "Project" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - flytesnacks @@ -298,7 +298,7 @@ The project and domain shard strategies, denoted by ``type: project`` and ``type # pod and scanning configuration redacted # ... shard: - type: domain # use the "domain" shard strategy + type: Domain # use the "Domain" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - production diff --git a/flytepropeller/manager/doc.go b/flytepropeller/manager/doc.go index 025b60cf1f..ceb3192e1a 100644 --- a/flytepropeller/manager/doc.go +++ b/flytepropeller/manager/doc.go @@ -18,7 +18,7 @@ FlytePropeller Manager handles dynamic updates to both the k8s PodTemplate and s # Shard Strategies -Flyte defines a variety of Shard Strategies for configuring how FlyteWorkflows are sharded. These options may include the shard type (ex. hash, project, or domain) along with the number of shards or the distribution of project / domain IDs over shards. +Flyte defines a variety of Shard Strategies for configuring how FlyteWorkflows are sharded. These options may include the shard type (ex. Hash, Project, or Domain) along with the number of shards or the distribution of project / domain IDs over shards. Internally, FlyteWorkflow CRDs are initialized with k8s labels for project, domain, and a shard-key. The project and domain label values are associated with the environment of the registered workflow. The shard-key value is a range-bounded hash over various components of the FlyteWorkflow metadata, currently the keyspace range is defined as [0,32). A sharded Flyte deployment ensures deterministic FlyteWorkflow evaluations by setting disjoint k8s label selectors, based on the aforementioned labels, on each managed FlytePropeller instance. This ensures that only a single FlytePropeller instance is responsible for processing each FlyteWorkflow. @@ -28,10 +28,10 @@ The Hash Shard Strategy, denoted by "type: hash" in the configuration below, use manager: # pod and scanning configuration redacted shard: - type: hash # use the "hash" shard strategy + type: Hash # use the "hash" shard strategy shard-count: 4 # the total number of shards -The Project and Domain Shard Strategies, denoted by "type: project" and "type: domain" respectively, use the FlyteWorkflow project and domain metadata to distributed FlyteWorkflows over managed FlytePropeller instances. These Shard Strategies are configured using a "per-shard-mapping" option, which is a list of ID lists. Each element in the "per-shard-mapping" list defines a new shard and the ID list assigns responsibility for the specified IDs to that shard. The assignment is performed using k8s label selectors, where each managed FlytePropeller instance includes FlyteWorkflows with the specified project or domain labels. +The Project and Domain Shard Strategies, denoted by "type: Project" and "type: Domain" respectively, use the FlyteWorkflow project and domain metadata to distributed FlyteWorkflows over managed FlytePropeller instances. These Shard Strategies are configured using a "per-shard-mapping" option, which is a list of ID lists. Each element in the "per-shard-mapping" list defines a new shard and the ID list assigns responsibility for the specified IDs to that shard. The assignment is performed using k8s label selectors, where each managed FlytePropeller instance includes FlyteWorkflows with the specified project or domain labels. A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs that are not covered by other shards. Only a single shard may be configured with a wildcard ID and on that shard their must be only one ID, namely the wildcard. In this case, the managed FlytePropeller instance uses k8s label selectors to exclude FlyteWorkflows with project or domain IDs from other shards. @@ -39,7 +39,7 @@ A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs manager: # pod and scanning configuration redacted shard: - type: project # use the "project" shard strategy + type: Project # use the "Project" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - flytesnacks @@ -53,7 +53,7 @@ A shard configured as a single wildcard ID (i.e. "*") is responsible for all IDs manager: # pod and scanning configuration redacted shard: - type: domain # use the "domain" shard strategy + type: Domain # use the "Domain" shard strategy per-shard-mapping: # a list of per shard mappings - one shard is created for each element - ids: # the list of ids to be managed by the first shard - production From 704f8ebea5b02b5bb789999b47843d60796f41da Mon Sep 17 00:00:00 2001 From: "terry.hung" Date: Sat, 2 Nov 2024 03:44:23 +0800 Subject: [PATCH 06/18] fix: align the default config output (#5947) Signed-off-by: terry.hung --- flytectl/pkg/configutil/configutil.go | 11 ----------- flytectl/pkg/configutil/configutil_test.go | 18 +++++------------- flytectl/pkg/sandbox/start.go | 7 +------ 3 files changed, 6 insertions(+), 30 deletions(-) diff --git a/flytectl/pkg/configutil/configutil.go b/flytectl/pkg/configutil/configutil.go index df2f099da5..c018052d91 100644 --- a/flytectl/pkg/configutil/configutil.go +++ b/flytectl/pkg/configutil/configutil.go @@ -16,17 +16,6 @@ const ( console: endpoint: {{.Console}} {{- end}} -{{- if .DataConfig}} -# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please -# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to -# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as -# minio is s3 compatible and we ship with minio in sandbox. -storage: - connection: - endpoint: {{.DataConfig.Endpoint}} - access-key: {{.DataConfig.AccessKey}} - secret-key: {{.DataConfig.SecretKey}} -{{- end}} ` ) diff --git a/flytectl/pkg/configutil/configutil_test.go b/flytectl/pkg/configutil/configutil_test.go index ccdf5035ef..10f8553247 100644 --- a/flytectl/pkg/configutil/configutil_test.go +++ b/flytectl/pkg/configutil/configutil_test.go @@ -1,6 +1,7 @@ package configutil import ( + "io" "io/ioutil" "os" "testing" @@ -20,7 +21,7 @@ func TestSetupConfig(t *testing.T) { } err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) assert.NoError(t, err) - configBytes, err := ioutil.ReadAll(file) + configBytes, err := io.ReadAll(file) assert.NoError(t, err) expected := `admin: # For GRPC endpoints you might want to use dns:///flyte.myexample.com @@ -62,21 +63,12 @@ console: } err = SetupConfig(file.Name(), AdminConfigTemplate, templateValue) assert.NoError(t, err) - configBytes, err = ioutil.ReadAll(file) + configBytes, err = io.ReadAll(file) assert.NoError(t, err) expected = `admin: # For GRPC endpoints you might want to use dns:///flyte.myexample.com endpoint: dns:///admin.example.com insecure: true -# This is not a needed configuration, only useful if you want to explore the data in sandbox. For non sandbox, please -# do not use this configuration, instead prefer to use aws, gcs, azure sessions. Flytekit, should use fsspec to -# auto select the right backend to pull data as long as the sessions are configured. For Sandbox, this is special, as -# minio is s3 compatible and we ship with minio in sandbox. -storage: - connection: - endpoint: http://localhost:9000 - access-key: my-access-key - secret-key: my-secret-key ` assert.Equal(t, expected, string(configBytes)) @@ -91,8 +83,8 @@ func TestConfigCleanup(t *testing.T) { if os.IsNotExist(err) { _ = os.MkdirAll(f.FilePathJoin(f.UserHomeDir(), ".flyte"), 0755) } - _ = ioutil.WriteFile(FlytectlConfig, []byte("string"), 0600) - _ = ioutil.WriteFile(Kubeconfig, []byte("string"), 0600) + _ = os.WriteFile(FlytectlConfig, []byte("string"), 0600) + _ = os.WriteFile(Kubeconfig, []byte("string"), 0600) err = ConfigCleanup() assert.Nil(t, err) diff --git a/flytectl/pkg/sandbox/start.go b/flytectl/pkg/sandbox/start.go index 8689aca6f2..6681baf5e1 100644 --- a/flytectl/pkg/sandbox/start.go +++ b/flytectl/pkg/sandbox/start.go @@ -175,13 +175,8 @@ func startSandbox(ctx context.Context, cli docker.Docker, g github.GHRepoService } templateValues := configutil.ConfigTemplateSpec{ - Host: "localhost:30080", + Host: "dns:///localhost:30080", Insecure: true, - DataConfig: &configutil.DataConfig{ - Endpoint: "http://localhost:30002", - AccessKey: "minio", - SecretKey: "miniostorage", - }, } if err := configutil.SetupConfig(configutil.FlytectlConfig, configutil.GetTemplate(), templateValues); err != nil { return nil, err From f7450303998094289cef2f8106c185eb8efc670a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=9F=E5=AE=B6=E7=91=8B?= <36886416+JiangJiaWei1103@users.noreply.github.com> Date: Sun, 3 Nov 2024 08:31:40 +0100 Subject: [PATCH 07/18] docs: Fix doc links to blob literal and type (#5952) Signed-off-by: JiaWei Jiang --- docs/user_guide/data_types_and_io/flytefile.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/user_guide/data_types_and_io/flytefile.md b/docs/user_guide/data_types_and_io/flytefile.md index 44378a315a..76dc0f6be8 100644 --- a/docs/user_guide/data_types_and_io/flytefile.md +++ b/docs/user_guide/data_types_and_io/flytefile.md @@ -8,9 +8,9 @@ Files are one of the most fundamental entities that users of Python work with, and they are fully supported by Flyte. In the IDL, they are known as -[Blob](https://github.com/flyteorg/flyteidl/blob/master/protos/flyteidl/core/literals.proto#L33) +[Blob](https://github.com/flyteorg/flyte/blob/master/flyteidl/protos/flyteidl/core/literals.proto#L33) literals which are backed by the -[blob type](https://github.com/flyteorg/flyteidl/blob/master/protos/flyteidl/core/types.proto#L47). +[blob type](https://github.com/flyteorg/flyte/blob/master/flyteidl/protos/flyteidl/core/types.proto#L73) Let's assume our mission here is pretty simple. We download a few CSV file links, read them with the python built-in {py:class}`csv.DictReader` function, From 636cc2339554522b740b275adb980006d2ee1de9 Mon Sep 17 00:00:00 2001 From: Eduardo Apolinario <653394+eapolinario@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:53:33 -0500 Subject: [PATCH 08/18] Fix remaining misuses of capturing the default file descriptors in flytectl unit tests (#5950) * Search and replace remaining instances of testutils.Setup() Signed-off-by: Eduardo Apolinario * Add cleanup in to the `testing.T` Cleanup method Signed-off-by: Eduardo Apolinario * Revert unintended change to executor Signed-off-by: Eduardo Apolinario --------- Signed-off-by: Eduardo Apolinario Co-authored-by: Eduardo Apolinario --- flytectl/cmd/compile/compile_test.go | 6 +- flytectl/cmd/create/create_test.go | 3 - flytectl/cmd/create/execution_test.go | 6 +- flytectl/cmd/create/execution_util_test.go | 55 ++++-------- flytectl/cmd/create/project_test.go | 14 ++- flytectl/cmd/delete/delete_test.go | 3 - flytectl/cmd/delete/execution_test.go | 10 ++- ...tchable_cluster_resource_attribute_test.go | 25 ++++-- .../matchable_execution_cluster_label_test.go | 25 ++++-- ...atchable_execution_queue_attribute_test.go | 25 ++++-- .../delete/matchable_plugin_override_test.go | 25 ++++-- .../matchable_task_resource_attribute_test.go | 25 ++++-- ...atchable_workflow_execution_config_test.go | 25 ++++-- flytectl/cmd/demo/exec_test.go | 3 +- flytectl/cmd/demo/status_test.go | 6 +- flytectl/cmd/demo/teardown_test.go | 3 +- flytectl/cmd/get/execution_test.go | 24 ++--- flytectl/cmd/get/get_test.go | 3 - flytectl/cmd/get/launch_plan_test.go | 39 +++----- ...tchable_cluster_resource_attribute_test.go | 18 ++-- .../matchable_execution_cluster_label_test.go | 18 ++-- ...atchable_execution_queue_attribute_test.go | 18 ++-- .../cmd/get/matchable_plugin_override_test.go | 18 ++-- .../matchable_task_resource_attribute_test.go | 18 ++-- ...atchable_workflow_execution_config_test.go | 18 ++-- flytectl/cmd/get/node_execution_test.go | 9 +- flytectl/cmd/get/project_test.go | 9 +- flytectl/cmd/get/task_test.go | 36 +++----- flytectl/cmd/get/workflow_test.go | 24 ++--- flytectl/cmd/register/examples_test.go | 7 +- flytectl/cmd/register/files_test.go | 28 ++++-- flytectl/cmd/register/register_test.go | 3 - flytectl/cmd/register/register_util_test.go | 88 +++++++++++++------ flytectl/cmd/sandbox/exec_test.go | 2 +- flytectl/cmd/sandbox/status_test.go | 4 +- flytectl/cmd/sandbox/teardown_test.go | 2 +- flytectl/cmd/testutils/test_utils.go | 14 +-- flytectl/cmd/update/execution_test.go | 20 +++-- flytectl/cmd/update/launch_plan_meta_test.go | 21 ++--- flytectl/cmd/update/launch_plan_test.go | 24 +++-- ...tchable_cluster_resource_attribute_test.go | 45 ++++++++-- .../matchable_execution_cluster_label_test.go | 42 ++++++++- ...atchable_execution_queue_attribute_test.go | 45 ++++++++-- .../update/matchable_plugin_override_test.go | 42 ++++++++- .../matchable_task_resource_attribute_test.go | 42 ++++++++- ...atchable_workflow_execution_config_test.go | 42 ++++++++- flytectl/cmd/update/named_entity_test.go | 8 +- flytectl/cmd/update/project_test.go | 18 +++- flytectl/cmd/update/task_meta_test.go | 21 ++--- flytectl/cmd/update/workflow_meta_test.go | 37 ++++++-- flytectl/cmd/upgrade/upgrade_test.go | 18 ++-- flytectl/cmd/version/version_test.go | 9 +- flytectl/pkg/sandbox/status_test.go | 6 +- 53 files changed, 671 insertions(+), 428 deletions(-) diff --git a/flytectl/cmd/compile/compile_test.go b/flytectl/cmd/compile/compile_test.go index 2d91260aff..3c90e6e54e 100644 --- a/flytectl/cmd/compile/compile_test.go +++ b/flytectl/cmd/compile/compile_test.go @@ -6,7 +6,7 @@ import ( config "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/compile" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" - u "github.com/flyteorg/flyte/flytectl/cmd/testutils" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" ) @@ -29,9 +29,7 @@ func TestCompileCommand(t *testing.T) { // compiling via cobra command compileCfg := config.DefaultCompileConfig compileCfg.File = "testdata/valid-package.tgz" - var setup = u.Setup - s := setup() - defer s.TearDown() + s := testutils.Setup(t) compileCmd := CreateCompileCommand()["compile"] err := compileCmd.CmdFunc(context.Background(), []string{}, s.CmdCtx) assert.Nil(t, err, "compiling via cmd returns err") diff --git a/flytectl/cmd/create/create_test.go b/flytectl/cmd/create/create_test.go index b7b5a2c32c..b122d64e51 100644 --- a/flytectl/cmd/create/create_test.go +++ b/flytectl/cmd/create/create_test.go @@ -4,14 +4,11 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) const testDataFolder = "../testdata/" -var setup = testutils.Setup - func TestCreateCommand(t *testing.T) { createCommand := RemoteCreateCommand() assert.Equal(t, createCommand.Use, "create") diff --git a/flytectl/cmd/create/execution_test.go b/flytectl/cmd/create/execution_test.go index 4886e2e7b8..d01b683e02 100644 --- a/flytectl/cmd/create/execution_test.go +++ b/flytectl/cmd/create/execution_test.go @@ -19,10 +19,11 @@ type createSuite struct { suite.Suite testutils.TestStruct originalExecConfig ExecutionConfig + t *testing.T } func (s *createSuite) SetupTest() { - s.TestStruct = setup() + s.TestStruct = testutils.Setup(s.t) // TODO: migrate to new command context from testutils s.CmdCtx = cmdCore.NewCommandContext(s.MockClient, s.MockOutStream) @@ -30,7 +31,6 @@ func (s *createSuite) SetupTest() { } func (s *createSuite) TearDownTest() { - defer s.TearDown() orig := s.originalExecConfig executionConfig = &orig s.MockAdminClient.AssertExpectations(s.T()) @@ -331,5 +331,5 @@ func (s *createSuite) Test_CreateTaskExecution_DryRun() { } func TestCreateSuite(t *testing.T) { - suite.Run(t, &createSuite{originalExecConfig: *executionConfig}) + suite.Run(t, &createSuite{originalExecConfig: *executionConfig, t: t}) } diff --git a/flytectl/cmd/create/execution_util_test.go b/flytectl/cmd/create/execution_util_test.go index 000e3621d3..e27ba4a96b 100644 --- a/flytectl/cmd/create/execution_util_test.go +++ b/flytectl/cmd/create/execution_util_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/stretchr/testify/assert" @@ -45,8 +46,7 @@ func createExecutionUtilSetup() { } func TestCreateExecutionForRelaunch(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(executionCreateResponse, nil) @@ -55,8 +55,7 @@ func TestCreateExecutionForRelaunch(t *testing.T) { } func TestCreateExecutionForRelaunchNotFound(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRelaunchExecutionMatch(s.Ctx, relaunchRequest).Return(nil, errors.New("unknown execution")) @@ -67,8 +66,7 @@ func TestCreateExecutionForRelaunchNotFound(t *testing.T) { } func TestCreateExecutionForRecovery(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(executionCreateResponse, nil) @@ -77,8 +75,7 @@ func TestCreateExecutionForRecovery(t *testing.T) { } func TestCreateExecutionForRecoveryNotFound(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.MockAdminClient.OnRecoverExecutionMatch(s.Ctx, recoverRequest).Return(nil, errors.New("unknown execution")) @@ -89,8 +86,7 @@ func TestCreateExecutionForRecoveryNotFound(t *testing.T) { func TestCreateExecutionRequestForWorkflow(t *testing.T) { t.Run("successful", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -100,8 +96,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -114,8 +109,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with empty envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -128,8 +122,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with execution Cluster label and envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{} @@ -144,8 +137,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, "cluster", execCreateRequest.Spec.ExecutionClusterLabel.Value) }) t.Run("failed literal conversion", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() launchPlan := &admin.LaunchPlan{ @@ -162,8 +154,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, fmt.Errorf("parameter [nilparam] has nil Variable"), err) }) t.Run("failed fetch", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.FetcherExt.OnFetchLPVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -173,8 +164,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { assert.Equal(t, err, errors.New("failed")) }) t.Run("with security context", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.KubeServiceAcct = "default" @@ -190,8 +180,7 @@ func TestCreateExecutionRequestForWorkflow(t *testing.T) { func TestCreateExecutionRequestForTask(t *testing.T) { t.Run("successful", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -205,8 +194,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -223,8 +211,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("successful with empty envs", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -241,8 +228,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.NotNil(t, execCreateRequest) }) t.Run("failed literal conversion", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() task := &admin.Task{ @@ -267,8 +253,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.Equal(t, fmt.Errorf("variable [nilvar] has nil type"), err) }) t.Run("failed fetch", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() s.FetcherExt.OnFetchTaskVersionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -278,8 +263,7 @@ func TestCreateExecutionRequestForTask(t *testing.T) { assert.Equal(t, err, errors.New("failed")) }) t.Run("with security context", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.KubeServiceAcct = "default" @@ -316,8 +300,7 @@ func Test_resolveOverrides(t *testing.T) { } func TestCreateExecutionForRelaunchOverwritingCache(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createExecutionUtilSetup() executionConfig.OverwriteCache = true diff --git a/flytectl/cmd/create/project_test.go b/flytectl/cmd/create/project_test.go index 1d63c0fceb..1dc33356f1 100644 --- a/flytectl/cmd/create/project_test.go +++ b/flytectl/cmd/create/project_test.go @@ -8,6 +8,7 @@ import ( "github.com/flyteorg/flyte/flytectl/clierrors" "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/project" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -36,13 +37,12 @@ func createProjectSetup() { project.DefaultProjectConfig.Description = "" config.GetConfig().Project = "" } + func TestCreateProjectFunc(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "project created successfully.") - defer s.TearDown() project.DefaultProjectConfig.ID = projectValue project.DefaultProjectConfig.Name = projectValue project.DefaultProjectConfig.Labels = map[string]string{} @@ -54,12 +54,10 @@ func TestCreateProjectFunc(t *testing.T) { } func TestEmptyProjectID(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "") - defer s.TearDown() project.DefaultProjectConfig = &project.ConfigProject{} s.MockAdminClient.OnRegisterProjectMatch(s.Ctx, projectRegisterRequest).Return(nil, nil) err := createProjectsCommand(s.Ctx, []string{}, s.CmdCtx) @@ -68,12 +66,10 @@ func TestEmptyProjectID(t *testing.T) { } func TestEmptyProjectName(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) createProjectSetup() defer s.TearDownAndVerify(t, "") - defer s.TearDown() project.DefaultProjectConfig.ID = projectValue project.DefaultProjectConfig.Labels = map[string]string{} project.DefaultProjectConfig.Description = "" diff --git a/flytectl/cmd/delete/delete_test.go b/flytectl/cmd/delete/delete_test.go index 0184450305..36b0cb8ad1 100644 --- a/flytectl/cmd/delete/delete_test.go +++ b/flytectl/cmd/delete/delete_test.go @@ -4,7 +4,6 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -13,8 +12,6 @@ const ( testDataInvalidAttrFile = "testdata/invalid_attribute.yaml" ) -var setup = testutils.Setup - func TestDeleteCommand(t *testing.T) { deleteCommand := RemoteDeleteCommand() assert.Equal(t, deleteCommand.Use, "delete") diff --git a/flytectl/cmd/delete/execution_test.go b/flytectl/cmd/delete/execution_test.go index c883a4d4df..6b71010879 100644 --- a/flytectl/cmd/delete/execution_test.go +++ b/flytectl/cmd/delete/execution_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/stretchr/testify/assert" @@ -32,7 +33,8 @@ func terminateExecutionSetup() { } func TestTerminateExecutionFunc(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) @@ -45,7 +47,8 @@ func TestTerminateExecutionFunc(t *testing.T) { } func TestTerminateExecutionFuncWithError(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(nil, errors.New("failed to terminate")) @@ -58,7 +61,8 @@ func TestTerminateExecutionFuncWithError(t *testing.T) { } func TestTerminateExecutionFuncWithPartialSuccess(t *testing.T) { - s := setup() + s := testutils.Setup(t) + terminateExecutionSetup() terminateExecResponse := &admin.ExecutionTerminateResponse{} s.MockAdminClient.OnTerminateExecutionMatch(s.Ctx, terminateExecRequests[0]).Return(terminateExecResponse, nil) diff --git a/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go index f2fe9ca49e..318959da95 100644 --- a/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/delete/matchable_cluster_resource_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/clusterresourceattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteClusterResourceAttributeSetup() { func TestDeleteClusterResourceAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_cluster_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_cluster_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteClusterResourceAttributes(t *testing.T) { admin.MatchableResource_CLUSTER_RESOURCE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteClusterResourceAttributeSetup() // Empty attribute file clusterresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_execution_cluster_label_test.go b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go index 7335bd6721..bc9d21a889 100644 --- a/flytectl/cmd/delete/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/delete/matchable_execution_cluster_label_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/executionclusterlabel" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteExecutionClusterLabelSetup() { func TestDeleteExecutionClusterLabels(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_cluster_label.yaml" @@ -56,7 +60,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_cluster_label.yaml" @@ -99,7 +106,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteExecutionClusterLabels(t *testing.T) { admin.MatchableResource_EXECUTION_CLUSTER_LABEL) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionClusterLabelSetup() // Empty attribute file executionclusterlabel.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go index 20402ee79b..53a9613c74 100644 --- a/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/delete/matchable_execution_queue_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/executionqueueattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteExecutionQueueAttributeSetup() { func TestDeleteExecutionQueueAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteExecutionQueueAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_execution_queue_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_execution_queue_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteExecutionQueueAttributes(t *testing.T) { admin.MatchableResource_EXECUTION_QUEUE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file executionqueueattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_plugin_override_test.go b/flytectl/cmd/delete/matchable_plugin_override_test.go index 623729fdd2..f070fe87b7 100644 --- a/flytectl/cmd/delete/matchable_plugin_override_test.go +++ b/flytectl/cmd/delete/matchable_plugin_override_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" pluginoverride "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/plugin_override" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deletePluginOverrideSetup() { func TestPluginOverride(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_plugin_override.yaml" @@ -56,7 +60,8 @@ func TestPluginOverride(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = "testdata/valid_workflow_plugin_override.yaml" @@ -99,7 +106,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestPluginOverride(t *testing.T) { admin.MatchableResource_PLUGIN_OVERRIDE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deletePluginOverrideSetup() // Empty attribute file pluginoverride.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_task_resource_attribute_test.go b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go index 484052b6ab..4b19275bfa 100644 --- a/flytectl/cmd/delete/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/delete/matchable_task_resource_attribute_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/taskresourceattribute" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteTaskResourceAttributeSetup() { func TestDeleteTaskResourceAttributes(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_task_attribute.yaml" @@ -56,7 +60,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = "testdata/valid_workflow_task_attribute.yaml" @@ -99,7 +106,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteTaskResourceAttributes(t *testing.T) { admin.MatchableResource_TASK_RESOURCE) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteTaskResourceAttributeSetup() // Empty attribute file taskresourceattribute.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/delete/matchable_workflow_execution_config_test.go b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go index 88681a32d5..7c473a5ffe 100644 --- a/flytectl/cmd/delete/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/delete/matchable_workflow_execution_config_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/workflowexecutionconfig" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" @@ -18,7 +19,8 @@ func deleteWorkflowExecutionConfigSetup() { func TestDeleteWorkflowExecutionConfig(t *testing.T) { t.Run("successful project domain attribute deletion commandline", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -31,7 +33,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("failed project domain attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion s.DeleterExt.OnDeleteProjectDomainAttributesMatch(mock.Anything, mock.Anything, mock.Anything, @@ -43,7 +46,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, config.GetConfig().Project, config.GetConfig().Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful project domain attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_project_domain_workflow_execution_config.yaml" @@ -56,7 +60,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { s.Ctx, "flytesnacks", "development", admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -70,7 +75,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("failed workflow attribute deletion", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "" @@ -85,7 +91,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("successful workflow attribute deletion file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = "testdata/valid_workflow_workflow_execution_config.yaml" @@ -99,7 +106,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("workflow attribute deletion non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataNonExistentFile @@ -113,7 +121,8 @@ func TestDeleteWorkflowExecutionConfig(t *testing.T) { admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG) }) t.Run("attribute deletion invalid file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + deleteWorkflowExecutionConfigSetup() // Empty attribute file workflowexecutionconfig.DefaultDelConfig.AttrFile = testDataInvalidAttrFile diff --git a/flytectl/cmd/demo/exec_test.go b/flytectl/cmd/demo/exec_test.go index c4b289105f..34ed289dc6 100644 --- a/flytectl/cmd/demo/exec_test.go +++ b/flytectl/cmd/demo/exec_test.go @@ -50,8 +50,7 @@ func TestDemoClusterExec(t *testing.T) { func TestSandboxClusterExecWithoutCmd(t *testing.T) { mockDocker := &mocks.Docker{} reader := bufio.NewReader(strings.NewReader("test")) - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx diff --git a/flytectl/cmd/demo/status_test.go b/flytectl/cmd/demo/status_test.go index f2006cdbf8..4080072160 100644 --- a/flytectl/cmd/demo/status_test.go +++ b/flytectl/cmd/demo/status_test.go @@ -14,16 +14,14 @@ import ( func TestDemoStatus(t *testing.T) { t.Run("Demo status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) docker.Client = mockDocker err := demoClusterStatus(s.Ctx, []string{}, s.CmdCtx) assert.Nil(t, err) }) t.Run("Demo status with running", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/demo/teardown_test.go b/flytectl/cmd/demo/teardown_test.go index 73927d86eb..4cbcf037a0 100644 --- a/flytectl/cmd/demo/teardown_test.go +++ b/flytectl/cmd/demo/teardown_test.go @@ -80,8 +80,7 @@ func TestTearDownFunc(t *testing.T) { func TestTearDownClusterFunc(t *testing.T) { _ = util.SetupFlyteDir() _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} diff --git a/flytectl/cmd/get/execution_test.go b/flytectl/cmd/get/execution_test.go index 329211621a..0598ec72b6 100644 --- a/flytectl/cmd/get/execution_test.go +++ b/flytectl/cmd/get/execution_test.go @@ -29,8 +29,7 @@ func getExecutionSetup() { func TestListExecutionFunc(t *testing.T) { getExecutionSetup() - s := setup() - defer s.TearDown() + s := testutils.Setup(t) executionResponse := &admin.Execution{ Id: &core.WorkflowExecutionIdentifier{ @@ -92,8 +91,7 @@ func TestListExecutionFuncWithError(t *testing.T) { Phase: core.WorkflowExecution_SUCCEEDED, }, } - s := setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnListExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("executions NotFound")) err := getExecutionFunc(s.Ctx, []string{}, s.CmdCtx) @@ -129,8 +127,7 @@ func TestGetExecutionFunc(t *testing.T) { }, } args := []string{executionNameValue} - s := setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(executionResponse, nil) err := getExecutionFunc(s.Ctx, args, s.CmdCtx) @@ -139,8 +136,7 @@ func TestGetExecutionFunc(t *testing.T) { } func TestGetExecutionFuncForDetails(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -156,8 +152,7 @@ func TestGetExecutionFuncForDetails(t *testing.T) { func TestGetExecutionFuncWithIOData(t *testing.T) { t.Run("successful inputs outputs", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx @@ -222,8 +217,7 @@ func TestGetExecutionFuncWithIOData(t *testing.T) { assert.Nil(t, err) }) t.Run("fetch data error from admin", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionSetup() ctx := s.Ctx @@ -264,8 +258,7 @@ func TestGetExecutionFuncWithIOData(t *testing.T) { args := []string{dummyExec} for _, tt := range tests { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config.GetConfig().Output = tt.outputFormat execution.DefaultConfig.NodeID = tt.nodeID @@ -365,8 +358,7 @@ func TestGetExecutionFuncWithError(t *testing.T) { } args := []string{executionNameValue} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) s.FetcherExt.OnFetchExecutionMatch(s.Ctx, mock.Anything, mock.Anything, mock.Anything).Return(nil, errors.New("execution NotFound")) err := getExecutionFunc(s.Ctx, args, s.CmdCtx) diff --git a/flytectl/cmd/get/get_test.go b/flytectl/cmd/get/get_test.go index c40394c785..c11e4339da 100644 --- a/flytectl/cmd/get/get_test.go +++ b/flytectl/cmd/get/get_test.go @@ -5,7 +5,6 @@ import ( "sort" "testing" - "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -19,8 +18,6 @@ const workflowNameValue = "wf_name" const workflowVersionValue = "wf_version" const testDataFolder = "../testdata/" -var setup = testutils.Setup - const ( testDataTempFile = "temp-output-file" testDataNotExistentTempFile = "non-existent-dir/temp-output-file" diff --git a/flytectl/cmd/get/launch_plan_test.go b/flytectl/cmd/get/launch_plan_test.go index 87cc091535..7b1359b7ec 100644 --- a/flytectl/cmd/get/launch_plan_test.go +++ b/flytectl/cmd/get/launch_plan_test.go @@ -215,8 +215,7 @@ func getLaunchPlanSetup() { func TestGetLaunchPlanFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) launchplan.DefaultConfig.Latest = true @@ -228,8 +227,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) launchplan.DefaultConfig.Version = "v1" @@ -241,8 +239,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Filter = filters.Filters{} launchplan.DefaultConfig.Filter = filters.Filters{} @@ -254,8 +251,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "launchplan1", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(nil, fmt.Errorf("error fetching all version")) @@ -266,8 +262,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { }) t.Run("failure fetching list", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() argsLp = []string{} s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(nil, fmt.Errorf("error fetching all version")) @@ -278,8 +273,7 @@ func TestGetLaunchPlanFuncWithError(t *testing.T) { } func TestGetLaunchPlanFunc(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLPMatch(mock.Anything, mock.Anything, "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) err := getLaunchPlanFunc(s.Ctx, argsLp, s.CmdCtx) @@ -289,8 +283,7 @@ func TestGetLaunchPlanFunc(t *testing.T) { } func TestGetLaunchPlanFuncLatest(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Latest = true s.FetcherExt.OnFetchLPLatestVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(launchPlan2, nil) @@ -301,8 +294,7 @@ func TestGetLaunchPlanFuncLatest(t *testing.T) { } func TestGetLaunchPlanWithVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() launchplan.DefaultConfig.Version = "v2" s.FetcherExt.OnFetchLPVersion(s.Ctx, "launchplan1", "v2", "dummyProject", "dummyDomain").Return(launchPlan2, nil) @@ -314,8 +306,7 @@ func TestGetLaunchPlanWithVersion(t *testing.T) { func TestGetLaunchPlans(t *testing.T) { t.Run("no workflow filter", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{}).Return(launchPlanListResponse.LaunchPlans, nil) argsLp = []string{} @@ -324,8 +315,7 @@ func TestGetLaunchPlans(t *testing.T) { s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) }) t.Run("workflow filter", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.FetcherExt.OnFetchAllVerOfLP(s.Ctx, "", "dummyProject", "dummyDomain", filters.Filters{ FieldSelector: "workflow.name=workflow2", @@ -337,8 +327,7 @@ func TestGetLaunchPlans(t *testing.T) { s.TearDownAndVerify(t, `[{"id": {"name": "launchplan1","version": "v2"},"spec": {"workflowId": {"name": "workflow2"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:01Z"}},{"id": {"name": "launchplan1","version": "v1"},"spec": {"workflowId": {"name": "workflow1"},"defaultInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}}},"closure": {"expectedInputs": {"parameters": {"generic": {"var": {"type": {"simple": "STRUCT"},"description": "generic"},"default": {"scalar": {"generic": {"foo": "foo"}}}},"numbers": {"var": {"type": {"collectionType": {"simple": "INTEGER"}},"description": "short desc"}},"numbers_count": {"var": {"type": {"simple": "INTEGER"},"description": "long description will be truncated in table"}},"run_local_at_count": {"var": {"type": {"simple": "INTEGER"},"description": "run_local_at_count"},"default": {"scalar": {"primitive": {"integer": "10"}}}}}},"createdAt": "1970-01-01T00:00:00Z"}}]`) }) t.Run("workflow filter error", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() argsLp = []string{} launchplan.DefaultConfig.Workflow = "workflow2" @@ -350,8 +339,7 @@ func TestGetLaunchPlans(t *testing.T) { } func TestGetLaunchPlansWithExecFile(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceListRequest).Return(launchPlanListResponse, nil) s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) @@ -386,8 +374,7 @@ workflow: launchplan1 } func TestGetLaunchPlanTableFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListLaunchPlansMatch(s.Ctx, resourceGetRequest).Return(launchPlanListResponse, nil) s.MockAdminClient.OnGetLaunchPlanMatch(s.Ctx, objectGetRequest).Return(launchPlan2, nil) diff --git a/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go index 43069edaa6..95e53e5b38 100644 --- a/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/get/matchable_cluster_resource_attribute_test.go @@ -47,8 +47,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","attributes":{"foo":"bar"}}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() clusterresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetClusterResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","attributes":{"foo":"bar"}}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getClusterResourceAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_execution_cluster_label_test.go b/flytectl/cmd/get/matchable_execution_cluster_label_test.go index 3ac42a87de..03a0bdba96 100644 --- a/flytectl/cmd/get/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/get/matchable_execution_cluster_label_test.go @@ -47,8 +47,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","value":"foo"}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() executionclusterlabel.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() executionclusterlabel.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed to get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetExecutionClusterLabel(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","value":"foo"}`) }) t.Run("failed to get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionClusterLabelSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_execution_queue_attribute_test.go b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go index 3dd8e235cf..74b8d4dd91 100644 --- a/flytectl/cmd/get/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/get/matchable_execution_queue_attribute_test.go @@ -47,8 +47,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","tags":["foo","bar"]}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() executionqueueattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() executionqueueattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetExecutionQueueAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","tags":["foo","bar"]}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getExecutionQueueAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_plugin_override_test.go b/flytectl/cmd/get/matchable_plugin_override_test.go index 025267a462..ba70299444 100644 --- a/flytectl/cmd/get/matchable_plugin_override_test.go +++ b/flytectl/cmd/get/matchable_plugin_override_test.go @@ -59,8 +59,7 @@ func TestGetPluginOverride(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() // No args implying project domain attribute deletion @@ -73,8 +72,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() pluginoverride.DefaultFetchConfig.AttrFile = testDataTempFile @@ -88,8 +86,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() pluginoverride.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -104,8 +101,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() // No args implying project domain attribute deletion @@ -119,8 +115,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() args := []string{"workflow"} @@ -133,8 +128,7 @@ func TestGetPluginOverride(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","overrides":[{"task_type":"python_task","plugin_id":["plugin-override1","plugin-override2"]},{"task_type":"java_task","plugin_id":["plugin-override3","plugin-override3"],"missing_plugin_behavior":1}]}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getPluginOverrideSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_task_resource_attribute_test.go b/flytectl/cmd/get/matchable_task_resource_attribute_test.go index b5e8887583..db830a0a29 100644 --- a/flytectl/cmd/get/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/get/matchable_task_resource_attribute_test.go @@ -54,8 +54,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() // No args implying project domain attribute deletion @@ -68,8 +67,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() taskresourceattribute.DefaultFetchConfig.AttrFile = testDataTempFile @@ -83,8 +81,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() taskresourceattribute.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -99,8 +96,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() // No args implying project domain attribute deletion @@ -114,8 +110,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() args := []string{"workflow"} @@ -129,8 +124,7 @@ func TestGetTaskResourceAttributes(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","defaults":{"cpu":"1","memory":"150Mi"},"limits":{"cpu":"2","memory":"350Mi"}}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskResourceAttributeSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/matchable_workflow_execution_config_test.go b/flytectl/cmd/get/matchable_workflow_execution_config_test.go index 69b88ee900..2600e6ea8a 100644 --- a/flytectl/cmd/get/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/get/matchable_workflow_execution_config_test.go @@ -47,8 +47,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { }, } t.Run("successful get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion @@ -61,8 +60,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","max_parallelism":5}`) }) t.Run("successful get project domain attribute and write to file", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataTempFile @@ -76,8 +74,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `wrote the config to file temp-output-file`) }) t.Run("successful get project domain attribute and write to file failure", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() workflowexecutionconfig.DefaultFetchConfig.AttrFile = testDataNotExistentTempFile @@ -92,8 +89,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("failed get project domain attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() // No args implying project domain attribute deletion @@ -107,8 +103,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, ``) }) t.Run("successful get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() args := []string{"workflow"} @@ -122,8 +117,7 @@ func TestGetWorkflowExecutionConfig(t *testing.T) { s.TearDownAndVerify(t, `{"project":"dummyProject","domain":"dummyDomain","workflow":"workflow","max_parallelism":5}`) }) t.Run("failed get workflow attribute", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowExecutionConfigSetup() args := []string{"workflow"} diff --git a/flytectl/cmd/get/node_execution_test.go b/flytectl/cmd/get/node_execution_test.go index 588ea6033c..030b5c0262 100644 --- a/flytectl/cmd/get/node_execution_test.go +++ b/flytectl/cmd/get/node_execution_test.go @@ -158,8 +158,7 @@ func createDummyTaskExecutionForNode(nodeID string, taskID string) *admin.TaskEx func TestGetExecutionDetails(t *testing.T) { t.Run("successful get details default view", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -226,8 +225,7 @@ func TestGetExecutionDetails(t *testing.T) { }) t.Run("successful get details default view for node-id", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx @@ -291,8 +289,7 @@ func TestGetExecutionDetails(t *testing.T) { }) t.Run("failure task exec fetch", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockCmdCtx := s.CmdCtx diff --git a/flytectl/cmd/get/project_test.go b/flytectl/cmd/get/project_test.go index 7bcc55a236..7b53a77a67 100644 --- a/flytectl/cmd/get/project_test.go +++ b/flytectl/cmd/get/project_test.go @@ -51,8 +51,7 @@ func getProjectSetup() { } func TestListProjectFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() project.DefaultConfig.Filter = filters.Filters{} @@ -65,8 +64,7 @@ func TestListProjectFunc(t *testing.T) { } func TestGetProjectFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() argsProject = []string{} @@ -80,8 +78,7 @@ func TestGetProjectFunc(t *testing.T) { } func TestGetProjectFuncError(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getProjectSetup() project.DefaultConfig.Filter = filters.Filters{ diff --git a/flytectl/cmd/get/task_test.go b/flytectl/cmd/get/task_test.go index 27e65d3fef..d0f817fd1e 100644 --- a/flytectl/cmd/get/task_test.go +++ b/flytectl/cmd/get/task_test.go @@ -170,8 +170,7 @@ func getTaskSetup() { func TestGetTaskFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -184,8 +183,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -198,8 +196,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -211,8 +208,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(nil, fmt.Errorf("error fetching all version")) @@ -225,8 +221,7 @@ func TestGetTaskFuncWithError(t *testing.T) { }) t.Run("failure fetching list task", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getLaunchPlanSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -242,8 +237,7 @@ func TestGetTaskFuncWithError(t *testing.T) { } func TestGetTaskFunc(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -329,8 +323,7 @@ func TestGetTaskFunc(t *testing.T) { } func TestGetTaskFuncWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -355,8 +348,7 @@ func TestGetTaskFuncWithTable(t *testing.T) { } func TestGetTaskFuncLatest(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -406,8 +398,7 @@ func TestGetTaskFuncLatest(t *testing.T) { } func TestGetTaskWithVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -458,8 +449,7 @@ func TestGetTaskWithVersion(t *testing.T) { } func TestGetTasks(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{} @@ -473,8 +463,7 @@ func TestGetTasks(t *testing.T) { } func TestGetTasksFilters(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() taskConfig.DefaultConfig.Filter = filters.Filters{ @@ -498,8 +487,7 @@ func TestGetTasksFilters(t *testing.T) { } func TestGetTaskWithExecFile(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getTaskSetup() s.MockAdminClient.OnListTasksMatch(s.Ctx, resourceListRequestTask).Return(taskListResponse, nil) diff --git a/flytectl/cmd/get/workflow_test.go b/flytectl/cmd/get/workflow_test.go index 3e01067750..20aa12e011 100644 --- a/flytectl/cmd/get/workflow_test.go +++ b/flytectl/cmd/get/workflow_test.go @@ -93,8 +93,7 @@ func getWorkflowSetup() { func TestGetWorkflowFuncWithError(t *testing.T) { t.Run("failure fetch latest", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -105,8 +104,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -118,8 +116,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching all version ", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() mockFetcher := new(mocks.AdminFetcherExtInterface) @@ -130,8 +127,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("failure fetching ", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Latest = true @@ -143,8 +139,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("fetching all workflow success", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() var args []string @@ -155,8 +150,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { }) t.Run("fetching all workflow error", func(t *testing.T) { - s := setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() var args []string @@ -169,8 +163,7 @@ func TestGetWorkflowFuncWithError(t *testing.T) { } func TestGetWorkflowFuncLatestWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Latest = true @@ -189,8 +182,7 @@ func TestGetWorkflowFuncLatestWithTable(t *testing.T) { } func TestListWorkflowFuncWithTable(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) getWorkflowSetup() workflow.DefaultConfig.Filter = filters.Filters{} diff --git a/flytectl/cmd/register/examples_test.go b/flytectl/cmd/register/examples_test.go index fc3996f185..3af84d0957 100644 --- a/flytectl/cmd/register/examples_test.go +++ b/flytectl/cmd/register/examples_test.go @@ -3,18 +3,21 @@ package register import ( "testing" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) func TestRegisterExamplesFunc(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{""} err := registerExamplesFunc(s.Ctx, args, s.CmdCtx) assert.NotNil(t, err) } func TestRegisterExamplesFuncErr(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() flytesnacks = "testingsnacks" args := []string{""} diff --git a/flytectl/cmd/register/files_test.go b/flytectl/cmd/register/files_test.go index 45827b7e13..1c468eb0a6 100644 --- a/flytectl/cmd/register/files_test.go +++ b/flytectl/cmd/register/files_test.go @@ -6,6 +6,7 @@ import ( "github.com/flyteorg/flyte/flytectl/cmd/config" rconfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/register" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service" "github.com/flyteorg/flyte/flytestdlib/contextutils" @@ -22,7 +23,8 @@ const ( func TestRegisterFromFiles(t *testing.T) { t.Run("Valid registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-parent-folder-register.tar"} @@ -34,7 +36,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Valid fast registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) registerFilesSetup() @@ -59,7 +62,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Register a workflow with a failure node", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) registerFilesSetup() @@ -84,7 +88,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed fast registration while uploading the codebase", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -105,7 +110,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed registration because of invalid files", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -125,7 +131,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.NotNil(t, err) }) t.Run("Failure registration of fast serialize", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -148,7 +155,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Equal(t, fmt.Errorf("failed"), err) }) t.Run("Failure registration of fast serialize continue on error", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -172,7 +180,8 @@ func TestRegisterFromFiles(t *testing.T) { assert.Equal(t, fmt.Errorf("failed"), err) }) t.Run("Valid registration of fast serialize", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) @@ -196,7 +205,8 @@ func TestRegisterFromFiles(t *testing.T) { }) t.Run("Registration with proto files ", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) diff --git a/flytectl/cmd/register/register_test.go b/flytectl/cmd/register/register_test.go index 43e89a3961..bf0a0affbb 100644 --- a/flytectl/cmd/register/register_test.go +++ b/flytectl/cmd/register/register_test.go @@ -6,7 +6,6 @@ import ( "sort" "testing" - u "github.com/flyteorg/flyte/flytectl/cmd/testutils" "github.com/stretchr/testify/assert" ) @@ -14,8 +13,6 @@ var ( GetDoFunc func(req *http.Request) (*http.Response, error) ) -var setup = u.Setup - func TestRegisterCommand(t *testing.T) { registerCommand := RemoteRegisterCommand() assert.Equal(t, registerCommand.Use, "register") diff --git a/flytectl/cmd/register/register_util_test.go b/flytectl/cmd/register/register_util_test.go index b6625c27de..e068c0f64a 100644 --- a/flytectl/cmd/register/register_util_test.go +++ b/flytectl/cmd/register/register_util_test.go @@ -13,6 +13,7 @@ import ( "testing" rconfig "github.com/flyteorg/flyte/flytectl/cmd/config/subcommand/register" + "github.com/flyteorg/flyte/flytectl/cmd/testutils" ghMocks "github.com/flyteorg/flyte/flytectl/pkg/github/mocks" "github.com/flyteorg/flyte/flyteidl/clients/go/admin/mocks" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" @@ -61,7 +62,8 @@ func registerFilesSetup() { } func TestGetSortedArchivedFileWithParentFolderList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-parent-folder-register.tar"} @@ -78,7 +80,8 @@ func TestGetSortedArchivedFileWithParentFolderList(t *testing.T) { } func TestGetSortedArchivedFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-register.tar"} @@ -95,7 +98,8 @@ func TestGetSortedArchivedFileList(t *testing.T) { } func TestGetSortedArchivedFileUnorderedList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-unordered-register.tar"} @@ -112,7 +116,8 @@ func TestGetSortedArchivedFileUnorderedList(t *testing.T) { } func TestGetSortedArchivedCorruptedFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid.tar"} @@ -125,7 +130,8 @@ func TestGetSortedArchivedCorruptedFileList(t *testing.T) { } func TestGetSortedArchivedTgzList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/valid-register.tgz"} @@ -142,7 +148,8 @@ func TestGetSortedArchivedTgzList(t *testing.T) { } func TestGetSortedArchivedCorruptedTgzFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid.tgz"} fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) @@ -154,7 +161,8 @@ func TestGetSortedArchivedCorruptedTgzFileList(t *testing.T) { } func TestGetSortedArchivedInvalidArchiveFileList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"testdata/invalid-extension-register.zip"} @@ -168,7 +176,8 @@ func TestGetSortedArchivedInvalidArchiveFileList(t *testing.T) { } func TestGetSortedArchivedFileThroughInvalidHttpList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + rconfig.DefaultFilesConfig.Archive = true args := []string{"http://invalidhost:invalidport/testdata/valid-register.tar"} fileList, tmpDir, err := GetSerializeOutputFiles(s.Ctx, args, rconfig.DefaultFilesConfig.Archive) @@ -180,7 +189,8 @@ func TestGetSortedArchivedFileThroughInvalidHttpList(t *testing.T) { } func TestGetSortedArchivedFileThroughValidHttpList(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"http://dummyhost:80/testdata/valid-register.tar"} @@ -197,7 +207,8 @@ func TestGetSortedArchivedFileThroughValidHttpList(t *testing.T) { } func TestGetSortedArchivedFileThroughValidHttpWithNullContextList(t *testing.T) { - setup() + testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.Archive = true args := []string{"http://dummyhost:80/testdata/valid-register.tar"} @@ -220,7 +231,8 @@ func Test_getTotalSize(t *testing.T) { func TestRegisterFile(t *testing.T) { t.Run("Successful run", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, nil) args := []string{"testdata/69_core.flyte_basics.lp.greet_1.pb"} @@ -230,7 +242,8 @@ func TestRegisterFile(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed Scheduled launch plan registration", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateLaunchPlanMatch(mock.Anything, mock.Anything).Return(nil, nil) variableMap := map[string]*core.Variable{ @@ -284,7 +297,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("Non existent file", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{"testdata/non-existent.pb"} var registerResults []Result @@ -295,7 +309,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("unmarhal failure", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() args := []string{"testdata/valid-register.tar"} var registerResults []Result @@ -306,7 +321,8 @@ func TestRegisterFile(t *testing.T) { assert.NotNil(t, err) }) t.Run("AlreadyExists", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, status.Error(codes.AlreadyExists, "AlreadyExists")) @@ -319,7 +335,8 @@ func TestRegisterFile(t *testing.T) { assert.Nil(t, err) }) t.Run("Registration Error", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.MockAdminClient.OnCreateTaskMatch(mock.Anything, mock.Anything).Return(nil, status.Error(codes.InvalidArgument, "Invalid")) @@ -335,7 +352,8 @@ func TestRegisterFile(t *testing.T) { func TestHydrateLaunchPlanSpec(t *testing.T) { t.Run("IamRole override", func(t *testing.T) { - setup() + testutils.Setup(t) + registerFilesSetup() rconfig.DefaultFilesConfig.AssumableIamRole = "iamRole" lpSpec := &admin.LaunchPlanSpec{} @@ -376,7 +394,8 @@ func TestHydrateLaunchPlanSpec(t *testing.T) { func TestUploadFastRegisterArtifact(t *testing.T) { t.Run("Successful upload", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) store, err := storage.NewDataStore(&storage.Config{ @@ -394,7 +413,8 @@ func TestUploadFastRegisterArtifact(t *testing.T) { assert.Nil(t, err) }) t.Run("Failed upload", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + testScope := promutils.NewTestScope() labeled.SetMetricKeys(contextutils.AppNameKey, contextutils.ProjectKey, contextutils.DomainKey) store, err := storage.NewDataStore(&storage.Config{ @@ -472,7 +492,8 @@ func TestGetAllFlytesnacksExample(t *testing.T) { func TestRegister(t *testing.T) { t.Run("Failed to register", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() node := &admin.NodeExecution{} err := register(s.Ctx, node, s.CmdCtx, rconfig.DefaultFilesConfig.DryRun, rconfig.DefaultFilesConfig.EnableSchedule) @@ -685,20 +706,23 @@ func TestLeftDiff(t *testing.T) { func TestValidateLaunchSpec(t *testing.T) { ctx := context.Background() t.Run("nil launchplan spec", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() err := validateLaunchSpec(ctx, nil, s.CmdCtx) assert.Nil(t, err) }) t.Run("launchplan spec with nil workflow id", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{} err := validateLaunchSpec(ctx, lpSpec, s.CmdCtx) assert.Nil(t, err) }) t.Run("launchplan spec with empty metadata", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ @@ -712,7 +736,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Nil(t, err) }) t.Run("launchplan spec with metadata and empty schedule", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() lpSpec := &admin.LaunchPlanSpec{ WorkflowId: &core.Identifier{ @@ -727,7 +752,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Nil(t, err) }) t.Run("validate spec failed to fetch workflow", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -752,7 +778,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Equal(t, "failed", err.Error()) }) t.Run("failed to fetch workflow", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() s.FetcherExt.OnFetchWorkflowVersionMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(nil, fmt.Errorf("failed")) @@ -774,7 +801,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Equal(t, "failed", err.Error()) }) t.Run("launchplan spec missing required param schedule", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { @@ -836,7 +864,8 @@ func TestValidateLaunchSpec(t *testing.T) { assert.Contains(t, err.Error(), "param values are missing on scheduled workflow for the following params") }) t.Run("launchplan spec non empty schedule default param success", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { @@ -941,7 +970,8 @@ func TestValidateLaunchSpec(t *testing.T) { }) t.Run("launchplan spec non empty schedule required param without value fail", func(t *testing.T) { - s := setup() + s := testutils.Setup(t) + registerFilesSetup() variableMap := map[string]*core.Variable{ "var1": { diff --git a/flytectl/cmd/sandbox/exec_test.go b/flytectl/cmd/sandbox/exec_test.go index 1fbe8dcadd..b7ff48e01e 100644 --- a/flytectl/cmd/sandbox/exec_test.go +++ b/flytectl/cmd/sandbox/exec_test.go @@ -50,7 +50,7 @@ func TestSandboxClusterExec(t *testing.T) { func TestSandboxClusterExecWithoutCmd(t *testing.T) { mockDocker := &mocks.Docker{} reader := bufio.NewReader(strings.NewReader("test")) - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/sandbox/status_test.go b/flytectl/cmd/sandbox/status_test.go index 41f43fadc7..03aa302e70 100644 --- a/flytectl/cmd/sandbox/status_test.go +++ b/flytectl/cmd/sandbox/status_test.go @@ -14,14 +14,14 @@ import ( func TestSandboxStatus(t *testing.T) { t.Run("Sandbox status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) docker.Client = mockDocker err := sandboxClusterStatus(s.Ctx, []string{}, s.CmdCtx) assert.Nil(t, err) }) t.Run("Sandbox status with running sandbox", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ diff --git a/flytectl/cmd/sandbox/teardown_test.go b/flytectl/cmd/sandbox/teardown_test.go index cb8c765138..8bead79cdb 100644 --- a/flytectl/cmd/sandbox/teardown_test.go +++ b/flytectl/cmd/sandbox/teardown_test.go @@ -20,7 +20,7 @@ func TestTearDownClusterFunc(t *testing.T) { var containers []types.Container _ = util.SetupFlyteDir() _ = util.WriteIntoFile([]byte("data"), configutil.FlytectlConfig) - s := testutils.Setup() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return(containers, nil) diff --git a/flytectl/cmd/testutils/test_utils.go b/flytectl/cmd/testutils/test_utils.go index 1e2bba1365..037d0d8374 100644 --- a/flytectl/cmd/testutils/test_utils.go +++ b/flytectl/cmd/testutils/test_utils.go @@ -41,8 +41,7 @@ type TestStruct struct { Stderr *os.File } -// Make sure to call TearDown after using this function -func Setup() (s TestStruct) { +func Setup(t *testing.T) (s TestStruct) { s.Ctx = context.Background() s.Reader, s.Writer, s.Err = os.Pipe() if s.Err != nil { @@ -67,12 +66,13 @@ func Setup() (s TestStruct) { config.GetConfig().Domain = domainValue config.GetConfig().Output = output - return s -} + // We need to make sure that the original final descriptors are restored after the test + t.Cleanup(func() { + os.Stdout = s.StdOut + os.Stderr = s.Stderr + }) -func (s *TestStruct) TearDown() { - os.Stdout = s.StdOut - os.Stderr = s.Stderr + return s } // TearDownAndVerify TODO: Change this to verify log lines from context diff --git a/flytectl/cmd/update/execution_test.go b/flytectl/cmd/update/execution_test.go index d8e2db59e7..fbcb0b02e9 100644 --- a/flytectl/cmd/update/execution_test.go +++ b/flytectl/cmd/update/execution_test.go @@ -16,6 +16,7 @@ import ( func TestExecutionCanBeActivated(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -34,6 +35,7 @@ func TestExecutionCanBeActivated(t *testing.T) { func TestExecutionCanBeArchived(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE config.Archive = true @@ -52,6 +54,7 @@ func TestExecutionCanBeArchived(t *testing.T) { func TestExecutionCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { config.Activate = true config.Archive = true @@ -64,6 +67,7 @@ func TestExecutionCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { func TestExecutionUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ACTIVE config.Activate = true @@ -77,6 +81,7 @@ func TestExecutionUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestExecutionUpdateWithoutForceFlagFails(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -90,6 +95,7 @@ func TestExecutionUpdateWithoutForceFlagFails(t *testing.T) { func TestExecutionUpdateDoesNothingWithDryRunFlag(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -104,6 +110,7 @@ func TestExecutionUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -119,6 +126,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testExecutionUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution) { execution.Closure.StateChangeDetails.State = admin.ExecutionState_EXECUTION_ARCHIVED config.Activate = true @@ -135,6 +143,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringExecutionUpdate(t *testing.T) { func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -153,6 +162,7 @@ func TestExecutionUpdateFailsWhenExecutionDoesNotExist(t *testing.T) { func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -174,8 +184,7 @@ func TestExecutionUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestExecutionUpdateRequiresExecutionName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) err := updateExecutionFunc(s.Ctx, nil, s.CmdCtx) @@ -183,10 +192,12 @@ func TestExecutionUpdateRequiresExecutionName(t *testing.T) { } func testExecutionUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), asserter func(s *testutils.TestStruct, err error), ) { testExecutionUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, execution *admin.Execution) { s.FetcherExt. OnFetchExecution(s.Ctx, execution.Id.Name, execution.Id.Project, execution.Id.Domain). @@ -201,12 +212,12 @@ func testExecutionUpdate( } func testExecutionUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, execution *admin.Execution), setup func(s *testutils.TestStruct, config *execution.UpdateConfig, execution *admin.Execution), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) target := newTestExecution() @@ -217,7 +228,6 @@ func testExecutionUpdateWithMockSetup( execution.UConfig = &execution.UpdateConfig{} if setup != nil { setup(&s, execution.UConfig, target) - defer s.TearDown() } args := []string{target.Id.Name} diff --git a/flytectl/cmd/update/launch_plan_meta_test.go b/flytectl/cmd/update/launch_plan_meta_test.go index 63d4ded737..aeb2e1638d 100644 --- a/flytectl/cmd/update/launch_plan_meta_test.go +++ b/flytectl/cmd/update/launch_plan_meta_test.go @@ -13,7 +13,7 @@ import ( ) func TestLaunchPlanMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +31,7 @@ func TestLaunchPlanMetadataCanBeActivated(t *testing.T) { } func TestLaunchPlanMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +49,7 @@ func TestLaunchPlanMetadataCanBeArchived(t *testing.T) { } func TestLaunchPlanMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +61,7 @@ func TestLaunchPlanMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing. } func TestLaunchPlanMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +74,7 @@ func TestLaunchPlanMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) } func TestLaunchPlanMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +87,7 @@ func TestLaunchPlanMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestLaunchPlanMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +101,7 @@ func TestLaunchPlanMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +116,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_LAUNCH_PLAN, + testNamedEntityUpdate(t, core.ResourceType_LAUNCH_PLAN, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +133,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanMetadataUpdate(t *testing.T func TestLaunchPlanMetadataUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_LAUNCH_PLAN, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +155,7 @@ func TestLaunchPlanMetadataUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { func TestLaunchPlanMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_LAUNCH_PLAN, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,8 +180,7 @@ func TestLaunchPlanMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestLaunchPlanMetadataUpdateRequiresLaunchPlanName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} diff --git a/flytectl/cmd/update/launch_plan_test.go b/flytectl/cmd/update/launch_plan_test.go index 5704702a2e..249a810118 100644 --- a/flytectl/cmd/update/launch_plan_test.go +++ b/flytectl/cmd/update/launch_plan_test.go @@ -16,6 +16,7 @@ import ( func TestLaunchPlanCanBeActivated(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -34,6 +35,7 @@ func TestLaunchPlanCanBeActivated(t *testing.T) { func TestLaunchPlanCanBeArchived(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Archive = true @@ -52,6 +54,7 @@ func TestLaunchPlanCanBeArchived(t *testing.T) { func TestLaunchPlanCanBeDeactivated(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Deactivate = true @@ -70,6 +73,7 @@ func TestLaunchPlanCanBeDeactivated(t *testing.T) { func TestLaunchPlanCannotBeActivatedAndDeactivatedAtTheSameTime(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { config.Activate = true config.Deactivate = true @@ -82,6 +86,7 @@ func TestLaunchPlanCannotBeActivatedAndDeactivatedAtTheSameTime(t *testing.T) { func TestLaunchPlanUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_ACTIVE config.Activate = true @@ -95,6 +100,7 @@ func TestLaunchPlanUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestLaunchPlanUpdateWithoutForceFlagFails(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -108,6 +114,7 @@ func TestLaunchPlanUpdateWithoutForceFlagFails(t *testing.T) { func TestLaunchPlanUpdateDoesNothingWithDryRunFlag(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -122,6 +129,7 @@ func TestLaunchPlanUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -137,6 +145,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testLaunchPlanUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan) { launchplan.Closure.State = admin.LaunchPlanState_INACTIVE config.Activate = true @@ -153,6 +162,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringLaunchPlanUpdate(t *testing.T) { func TestLaunchPlanUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -173,6 +183,7 @@ func TestLaunchPlanUpdateFailsWhenLaunchPlanDoesNotExist(t *testing.T) { func TestLaunchPlanUpdateFailsWhenAdminClientFails(t *testing.T) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -196,8 +207,7 @@ func TestLaunchPlanUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestLaunchPlanUpdateRequiresLaunchPlanName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) launchplan.UConfig = &launchplan.UpdateConfig{} @@ -211,8 +221,7 @@ func TestLaunchPlanUpdateRequiresLaunchPlanName(t *testing.T) { } func TestLaunchPlanUpdateRequiresLaunchPlanVersion(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) launchplan.UConfig = &launchplan.UpdateConfig{} @@ -226,10 +235,12 @@ func TestLaunchPlanUpdateRequiresLaunchPlanVersion(t *testing.T) { } func testLaunchPlanUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), asserter func(s *testutils.TestStruct, err error), ) { testLaunchPlanUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, launchplan *admin.LaunchPlan) { s.MockAdminClient. OnGetLaunchPlanMatch( @@ -246,12 +257,12 @@ func testLaunchPlanUpdate( } func testLaunchPlanUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, launchplan *admin.LaunchPlan), setup func(s *testutils.TestStruct, config *launchplan.UpdateConfig, launchplan *admin.LaunchPlan), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) target := newTestLaunchPlan() @@ -262,7 +273,6 @@ func testLaunchPlanUpdateWithMockSetup( launchplan.UConfig = &launchplan.UpdateConfig{} if setup != nil { setup(&s, launchplan.UConfig, target) - defer s.TearDown() } args := []string{target.Id.Name} diff --git a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go index c902794685..b7288d6dcc 100644 --- a/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_cluster_resource_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestClusterResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestClusterResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestClusterResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *tes func TestClusterResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *test func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = true @@ -82,6 +87,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = true @@ -97,6 +103,7 @@ func TestClusterResourceAttributeUpdateHappyPath(t *testing.T) { func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = false @@ -121,6 +129,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = false @@ -135,6 +144,7 @@ func TestClusterResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestClusterResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("workflow with --force", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowClusterResourceAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("domain without --force", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = false @@ -212,6 +227,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("domain with --force", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainClusterResourceAttributesFilePath config.Force = true @@ -225,6 +241,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("project without --force", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = false @@ -238,6 +255,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) t.Run("project with --force", func(t *testing.T) { testProjectClusterResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectClusterResourceAttributesFilePath config.Force = true @@ -253,6 +271,7 @@ func TestClusterResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -274,6 +293,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -295,6 +315,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -318,6 +339,7 @@ func TestClusterResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testi func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -338,6 +360,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -358,6 +381,7 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -378,10 +402,12 @@ func TestClusterResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_CLUSTER_RESOURCE). @@ -396,12 +422,12 @@ func testWorkflowClusterResourceAttributeUpdate( } func testWorkflowClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestWorkflowClusterResourceAttribute() @@ -412,7 +438,6 @@ func testWorkflowClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -446,10 +471,12 @@ func newTestWorkflowClusterResourceAttribute() *admin.WorkflowAttributes { } func testProjectClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_CLUSTER_RESOURCE). @@ -464,12 +491,12 @@ func testProjectClusterResourceAttributeUpdate( } func testProjectClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestProjectClusterResourceAttribute() @@ -480,7 +507,6 @@ func testProjectClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -512,10 +538,12 @@ func newTestProjectClusterResourceAttribute() *admin.ProjectAttributes { } func testProjectDomainClusterResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_CLUSTER_RESOURCE). @@ -530,11 +558,13 @@ func testProjectDomainClusterResourceAttributeUpdate( } func testProjectDomainClusterResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *clusterresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + clusterresourceattribute.DefaultUpdateConfig = &clusterresourceattribute.AttrUpdateConfig{} target := newTestProjectDomainClusterResourceAttribute() @@ -544,7 +574,6 @@ func testProjectDomainClusterResourceAttributeUpdateWithMockSetup( if setup != nil { setup(&s, clusterresourceattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateClusterResourceAttributesFunc(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/update/matchable_execution_cluster_label_test.go b/flytectl/cmd/update/matchable_execution_cluster_label_test.go index 0bbccbc83b..1006234626 100644 --- a/flytectl/cmd/update/matchable_execution_cluster_label_test.go +++ b/flytectl/cmd/update/matchable_execution_cluster_label_test.go @@ -20,6 +20,7 @@ const ( func TestExecutionClusterLabelUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestExecutionClusterLabelUpdateRequiresAttributeFile(t *testing.T) { func TestExecutionClusterLabelUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAttributeFileDoesNotExist(t *testin func TestExecutionClusterLabelUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAttributeFileIsMalformed(t *testing func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = true @@ -69,6 +73,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = true @@ -82,6 +87,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = true @@ -97,6 +103,7 @@ func TestExecutionClusterLabelUpdateHappyPath(t *testing.T) { func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = false @@ -109,6 +116,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = false @@ -121,6 +129,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = false @@ -135,6 +144,7 @@ func TestExecutionClusterLabelUpdateFailsWithoutForceFlag(t *testing.T) { func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestExecutionClusterLabelUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = false @@ -186,6 +199,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionClusterLabelFilePath config.Force = true @@ -199,6 +213,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = false @@ -212,6 +227,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionClusterLabelFilePath config.Force = true @@ -225,6 +241,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = false @@ -238,6 +255,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectExecutionClusterLabelUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionClusterLabelFilePath config.Force = true @@ -253,6 +271,7 @@ func TestExecutionClusterLabelUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -274,6 +293,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -295,6 +315,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -318,6 +339,7 @@ func TestExecutionClusterLabelUpdateSucceedsWhenAttributesDoNotExist(t *testing. func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -338,6 +360,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -358,6 +381,7 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -378,10 +402,12 @@ func TestExecutionClusterLabelUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -396,11 +422,13 @@ func testWorkflowExecutionClusterLabelUpdate( } func testWorkflowExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestWorkflowExecutionClusterLabel() @@ -439,10 +467,12 @@ func newTestWorkflowExecutionClusterLabel() *admin.WorkflowAttributes { } func testProjectExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -457,11 +487,13 @@ func testProjectExecutionClusterLabelUpdate( } func testProjectExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestProjectExecutionClusterLabel() @@ -498,10 +530,12 @@ func newTestProjectExecutionClusterLabel() *admin.ProjectAttributes { } func testProjectDomainExecutionClusterLabelUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_CLUSTER_LABEL). @@ -516,11 +550,13 @@ func testProjectDomainExecutionClusterLabelUpdate( } func testProjectDomainExecutionClusterLabelUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *executionclusterlabel.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionclusterlabel.DefaultUpdateConfig = &executionclusterlabel.AttrUpdateConfig{} target := newTestProjectDomainExecutionClusterLabel() diff --git a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go index a88dc80717..e16526faa6 100644 --- a/flytectl/cmd/update/matchable_execution_queue_attribute_test.go +++ b/flytectl/cmd/update/matchable_execution_queue_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestExecutionQueueAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestExecutionQueueAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *test func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testi func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = true @@ -82,6 +87,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = true @@ -97,6 +103,7 @@ func TestExecutionQueueAttributeUpdateHappyPath(t *testing.T) { func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = false @@ -121,6 +129,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = false @@ -135,6 +144,7 @@ func TestExecutionQueueAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestExecutionQueueAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionQueueMatchableAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = false @@ -212,6 +227,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainExecutionQueueMatchableAttributeFilePath config.Force = true @@ -225,6 +241,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = false @@ -238,6 +255,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectExecutionQueueAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectExecutionQueueMatchableAttributeFilePath config.Force = true @@ -253,6 +271,7 @@ func TestExecutionQueueAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -274,6 +293,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -295,6 +315,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -318,6 +339,7 @@ func TestExecutionQueueAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testin func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -338,6 +360,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -358,6 +381,7 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -378,10 +402,12 @@ func TestExecutionQueueAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_EXECUTION_QUEUE). @@ -396,11 +422,13 @@ func testWorkflowExecutionQueueAttributeUpdate( } func testWorkflowExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestWorkflowExecutionQueueAttribute() @@ -410,7 +438,6 @@ func testWorkflowExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -444,10 +471,12 @@ func newTestWorkflowExecutionQueueAttribute() *admin.WorkflowAttributes { } func testProjectExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_EXECUTION_QUEUE). @@ -462,12 +491,12 @@ func testProjectExecutionQueueAttributeUpdate( } func testProjectExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestProjectExecutionQueueAttribute() @@ -478,7 +507,6 @@ func testProjectExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) @@ -510,10 +538,12 @@ func newTestProjectExecutionQueueAttribute() *admin.ProjectAttributes { } func testProjectDomainExecutionQueueAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_EXECUTION_QUEUE). @@ -528,11 +558,13 @@ func testProjectDomainExecutionQueueAttributeUpdate( } func testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *executionqueueattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + executionqueueattribute.DefaultUpdateConfig = &executionqueueattribute.AttrUpdateConfig{} target := newTestProjectDomainExecutionQueueAttribute() @@ -542,7 +574,6 @@ func testProjectDomainExecutionQueueAttributeUpdateWithMockSetup( if setup != nil { setup(&s, executionqueueattribute.DefaultUpdateConfig, target) - defer s.TearDown() } err := updateExecutionQueueAttributesFunc(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/update/matchable_plugin_override_test.go b/flytectl/cmd/update/matchable_plugin_override_test.go index 3207951db6..3b0181392b 100644 --- a/flytectl/cmd/update/matchable_plugin_override_test.go +++ b/flytectl/cmd/update/matchable_plugin_override_test.go @@ -20,6 +20,7 @@ const ( func TestPluginOverrideUpdateRequiresAttributeFile(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestPluginOverrideUpdateRequiresAttributeFile(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestPluginOverrideUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestPluginOverrideUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = true @@ -69,6 +73,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = true @@ -82,6 +87,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = true @@ -97,6 +103,7 @@ func TestPluginOverrideUpdateHappyPath(t *testing.T) { func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = false @@ -109,6 +116,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = false @@ -121,6 +129,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = false @@ -135,6 +144,7 @@ func TestPluginOverrideUpdateFailsWithoutForceFlag(t *testing.T) { func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestPluginOverrideUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = false @@ -186,6 +199,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowPluginOverrideFilePath config.Force = true @@ -199,6 +213,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = false @@ -212,6 +227,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainPluginOverrideFilePath config.Force = true @@ -225,6 +241,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = false @@ -238,6 +255,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectPluginOverrideUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectPluginOverrideFilePath config.Force = true @@ -253,6 +271,7 @@ func TestPluginOverrideUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -274,6 +293,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -295,6 +315,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -318,6 +339,7 @@ func TestPluginOverrideUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -338,6 +360,7 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -358,6 +381,7 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -378,10 +402,12 @@ func TestPluginOverrideUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -396,11 +422,13 @@ func testWorkflowPluginOverrideUpdate( } func testWorkflowPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestWorkflowPluginOverride() @@ -449,10 +477,12 @@ func newTestWorkflowPluginOverride() *admin.WorkflowAttributes { } func testProjectPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -467,11 +497,13 @@ func testProjectPluginOverrideUpdate( } func testProjectPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestProjectPluginOverride() @@ -518,10 +550,12 @@ func newTestProjectPluginOverride() *admin.ProjectAttributes { } func testProjectDomainPluginOverrideUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainPluginOverrideUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_PLUGIN_OVERRIDE). @@ -536,11 +570,13 @@ func testProjectDomainPluginOverrideUpdate( } func testProjectDomainPluginOverrideUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *pluginoverride.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + pluginoverride.DefaultUpdateConfig = &pluginoverride.AttrUpdateConfig{} target := newTestProjectDomainPluginOverride() diff --git a/flytectl/cmd/update/matchable_task_resource_attribute_test.go b/flytectl/cmd/update/matchable_task_resource_attribute_test.go index 6e54b17e34..42c2c3ab4f 100644 --- a/flytectl/cmd/update/matchable_task_resource_attribute_test.go +++ b/flytectl/cmd/update/matchable_task_resource_attribute_test.go @@ -20,6 +20,7 @@ const ( func TestTaskResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestTaskResourceAttributeUpdateRequiresAttributeFile(t *testing.T) { func TestTaskResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAttributeFileDoesNotExist(t *testin func TestTaskResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAttributeFileIsMalformed(t *testing func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = true @@ -69,6 +73,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = true @@ -82,6 +87,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = true @@ -97,6 +103,7 @@ func TestTaskResourceAttributeUpdateHappyPath(t *testing.T) { func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = false @@ -109,6 +116,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = false @@ -121,6 +129,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = false @@ -135,6 +144,7 @@ func TestTaskResourceAttributeUpdateFailsWithoutForceFlag(t *testing.T) { func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestTaskResourceAttributeUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = false @@ -186,6 +199,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowTaskAttributesFilePath config.Force = true @@ -199,6 +213,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = false @@ -212,6 +227,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainTaskAttributesFilePath config.Force = true @@ -225,6 +241,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = false @@ -238,6 +255,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectTaskResourceAttributeUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectTaskAttributesFilePath config.Force = true @@ -253,6 +271,7 @@ func TestTaskResourceAttributeUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -274,6 +293,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -295,6 +315,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -318,6 +339,7 @@ func TestTaskResourceAttributeUpdateSucceedsWhenAttributesDoNotExist(t *testing. func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -338,6 +360,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -358,6 +381,7 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -378,10 +402,12 @@ func TestTaskResourceAttributeUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_TASK_RESOURCE). @@ -396,11 +422,13 @@ func testWorkflowTaskResourceAttributeUpdate( } func testWorkflowTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestWorkflowTaskResourceAttribute() @@ -442,10 +470,12 @@ func newTestWorkflowTaskResourceAttribute() *admin.WorkflowAttributes { } func testProjectTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_TASK_RESOURCE). @@ -460,11 +490,13 @@ func testProjectTaskResourceAttributeUpdate( } func testProjectTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestProjectTaskResourceAttribute() @@ -504,10 +536,12 @@ func newTestProjectTaskResourceAttribute() *admin.ProjectAttributes { } func testProjectDomainTaskResourceAttributeUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_TASK_RESOURCE). @@ -522,11 +556,13 @@ func testProjectDomainTaskResourceAttributeUpdate( } func testProjectDomainTaskResourceAttributeUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *taskresourceattribute.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + taskresourceattribute.DefaultUpdateConfig = &taskresourceattribute.AttrUpdateConfig{} target := newTestProjectDomainTaskResourceAttribute() diff --git a/flytectl/cmd/update/matchable_workflow_execution_config_test.go b/flytectl/cmd/update/matchable_workflow_execution_config_test.go index 06e0996d37..c75b2fd58f 100644 --- a/flytectl/cmd/update/matchable_workflow_execution_config_test.go +++ b/flytectl/cmd/update/matchable_workflow_execution_config_test.go @@ -20,6 +20,7 @@ const ( func TestWorkflowExecutionConfigUpdateRequiresAttributeFile(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ nil, /* assert */ func(s *testutils.TestStruct, err error) { assert.ErrorContains(t, err, "attrFile is mandatory") @@ -29,6 +30,7 @@ func TestWorkflowExecutionConfigUpdateRequiresAttributeFile(t *testing.T) { func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileDoesNotExist(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataNonExistentFile config.Force = true @@ -42,6 +44,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileDoesNotExist(t *test func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileIsMalformed(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = testDataInvalidAttrFile config.Force = true @@ -56,6 +59,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAttributeFileIsMalformed(t *testi func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = true @@ -69,6 +73,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = true @@ -82,6 +87,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = true @@ -97,6 +103,7 @@ func TestWorkflowExecutionConfigUpdateHappyPath(t *testing.T) { func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = false @@ -109,6 +116,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = false @@ -121,6 +129,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = false @@ -135,6 +144,7 @@ func TestWorkflowExecutionConfigUpdateFailsWithoutForceFlag(t *testing.T) { func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.DryRun = true @@ -147,6 +157,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.DryRun = true @@ -159,6 +170,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.DryRun = true @@ -173,6 +185,7 @@ func TestWorkflowExecutionConfigUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow without --force", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = false @@ -186,6 +199,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("workflow with --force", func(t *testing.T) { testWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes) { config.AttrFile = validWorkflowExecutionConfigFilePath config.Force = true @@ -199,6 +213,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain without --force", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = false @@ -212,6 +227,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("domain with --force", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes) { config.AttrFile = validProjectDomainWorkflowExecutionConfigFilePath config.Force = true @@ -225,6 +241,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project without --force", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = false @@ -238,6 +255,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { t.Run("project with --force", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes) { config.AttrFile = validProjectWorkflowExecutionConfigFilePath config.Force = true @@ -253,6 +271,7 @@ func TestWorkflowExecutionConfigUpdateIgnoresForceFlagWithDryRun(t *testing.T) { func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -274,6 +293,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -295,6 +315,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -318,6 +339,7 @@ func TestWorkflowExecutionConfigUpdateSucceedsWhenAttributesDoNotExist(t *testin func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("workflow", func(t *testing.T) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -338,6 +360,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("domain", func(t *testing.T) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -358,6 +381,7 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { t.Run("project", func(t *testing.T) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -378,10 +402,12 @@ func TestWorkflowExecutionConfigUpdateFailsWhenAdminClientFails(t *testing.T) { } func testWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { testWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.WorkflowAttributes) { s.FetcherExt. OnFetchWorkflowAttributesMatch(s.Ctx, target.Project, target.Domain, target.Workflow, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -396,11 +422,13 @@ func testWorkflowExecutionConfigUpdate( } func testWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.WorkflowAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.WorkflowAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestWorkflowExecutionConfig() @@ -446,10 +474,12 @@ func newTestWorkflowExecutionConfig() *admin.WorkflowAttributes { } func testProjectWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectAttributes) { s.FetcherExt. OnFetchProjectAttributesMatch(s.Ctx, target.Project, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -464,11 +494,13 @@ func testProjectWorkflowExecutionConfigUpdate( } func testProjectWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestProjectWorkflowExecutionConfig() @@ -512,10 +544,12 @@ func newTestProjectWorkflowExecutionConfig() *admin.ProjectAttributes { } func testProjectDomainWorkflowExecutionConfigUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes) { s.FetcherExt. OnFetchProjectDomainAttributesMatch(s.Ctx, target.Project, target.Domain, admin.MatchableResource_WORKFLOW_EXECUTION_CONFIG). @@ -530,11 +564,13 @@ func testProjectDomainWorkflowExecutionConfigUpdate( } func testProjectDomainWorkflowExecutionConfigUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, target *admin.ProjectDomainAttributes), setup func(s *testutils.TestStruct, config *workflowexecutionconfig.AttrUpdateConfig, target *admin.ProjectDomainAttributes), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + workflowexecutionconfig.DefaultUpdateConfig = &workflowexecutionconfig.AttrUpdateConfig{} target := newTestProjectDomainWorkflowExecutionConfig() diff --git a/flytectl/cmd/update/named_entity_test.go b/flytectl/cmd/update/named_entity_test.go index 2dbb50fba5..4d4e5b2783 100644 --- a/flytectl/cmd/update/named_entity_test.go +++ b/flytectl/cmd/update/named_entity_test.go @@ -3,6 +3,7 @@ package update import ( "context" "fmt" + "testing" "github.com/flyteorg/flyte/flytectl/cmd/config" cmdCore "github.com/flyteorg/flyte/flytectl/cmd/core" @@ -13,11 +14,13 @@ import ( ) func testNamedEntityUpdate( + t *testing.T, resourceType core.ResourceType, setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), asserter func(s *testutils.TestStruct, err error), ) { testNamedEntityUpdateWithMockSetup( + t, resourceType, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -35,13 +38,13 @@ func testNamedEntityUpdate( } func testNamedEntityUpdateWithMockSetup( + t *testing.T, resourceType core.ResourceType, mockSetup func(s *testutils.TestStruct, namedEntity *admin.NamedEntity), setup func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} target := newTestNamedEntity(resourceType) @@ -52,7 +55,6 @@ func testNamedEntityUpdateWithMockSetup( if setup != nil { setup(&s, config, target) - defer s.TearDown() } updateMetadataFactory := getUpdateMetadataFactory(resourceType) diff --git a/flytectl/cmd/update/project_test.go b/flytectl/cmd/update/project_test.go index c5785e0a12..0ca41c4309 100644 --- a/flytectl/cmd/update/project_test.go +++ b/flytectl/cmd/update/project_test.go @@ -15,6 +15,7 @@ import ( func TestProjectCanBeActivated(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -33,6 +34,7 @@ func TestProjectCanBeActivated(t *testing.T) { func TestProjectCanBeArchived(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ACTIVE config.Archive = true @@ -51,6 +53,7 @@ func TestProjectCanBeArchived(t *testing.T) { func TestProjectCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { config.Activate = true config.Archive = true @@ -63,6 +66,7 @@ func TestProjectCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { func TestProjectUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ACTIVE config.Activate = true @@ -76,6 +80,7 @@ func TestProjectUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { func TestProjectUpdateWithoutForceFlagFails(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -89,6 +94,7 @@ func TestProjectUpdateWithoutForceFlagFails(t *testing.T) { func TestProjectUpdateDoesNothingWithDryRunFlag(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -103,6 +109,7 @@ func TestProjectUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -118,6 +125,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { t.Run("with --force", func(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = true @@ -134,6 +142,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringProjectUpdate(t *testing.T) { func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -152,6 +161,7 @@ func TestProjectUpdateFailsWhenProjectDoesNotExist(t *testing.T) { func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -174,6 +184,7 @@ func TestProjectUpdateFailsWhenAdminClientFails(t *testing.T) { func TestProjectUpdateRequiresProjectId(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { config.ID = "" }, @@ -184,6 +195,7 @@ func TestProjectUpdateRequiresProjectId(t *testing.T) { func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { testProjectUpdate( + t, /* setup */ func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project) { project.State = admin.Project_ARCHIVED config.Activate = false @@ -203,10 +215,12 @@ func TestProjectUpdateDoesNotActivateArchivedProject(t *testing.T) { } func testProjectUpdate( + t *testing.T, setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), asserter func(s *testutils.TestStruct, err error), ) { testProjectUpdateWithMockSetup( + t, /* mockSetup */ func(s *testutils.TestStruct, project *admin.Project) { s.FetcherExt. OnGetProjectByID(s.Ctx, project.Id). @@ -221,11 +235,13 @@ func testProjectUpdate( } func testProjectUpdateWithMockSetup( + t *testing.T, mockSetup func(s *testutils.TestStruct, project *admin.Project), setup func(s *testutils.TestStruct, config *project.ConfigProject, project *admin.Project), asserter func(s *testutils.TestStruct, err error), ) { - s := testutils.Setup() + s := testutils.Setup(t) + target := newTestProject() if mockSetup != nil { diff --git a/flytectl/cmd/update/task_meta_test.go b/flytectl/cmd/update/task_meta_test.go index 09cc573115..69cd7c4072 100644 --- a/flytectl/cmd/update/task_meta_test.go +++ b/flytectl/cmd/update/task_meta_test.go @@ -13,7 +13,7 @@ import ( ) func TestTaskMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +31,7 @@ func TestTaskMetadataCanBeActivated(t *testing.T) { } func TestTaskMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +49,7 @@ func TestTaskMetadataCanBeArchived(t *testing.T) { } func TestTaskMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +61,7 @@ func TestTaskMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { } func TestTaskMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +74,7 @@ func TestTaskMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { } func TestTaskMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +87,7 @@ func TestTaskMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestTaskMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +101,7 @@ func TestTaskMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +116,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_TASK, + testNamedEntityUpdate(t, core.ResourceType_TASK, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +133,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringTaskMetadataUpdate(t *testing.T) { func TestTaskMetadataUpdateFailsWhenTaskDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_TASK, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +155,7 @@ func TestTaskMetadataUpdateFailsWhenTaskDoesNotExist(t *testing.T) { func TestTaskMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_TASK, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,8 +180,7 @@ func TestTaskMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestTaskMetadataUpdateRequiresTaskName(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) config := &NamedEntityConfig{} diff --git a/flytectl/cmd/update/workflow_meta_test.go b/flytectl/cmd/update/workflow_meta_test.go index 05589ee6c7..fc620b44aa 100644 --- a/flytectl/cmd/update/workflow_meta_test.go +++ b/flytectl/cmd/update/workflow_meta_test.go @@ -13,7 +13,9 @@ import ( ) func TestWorkflowMetadataCanBeActivated(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -31,7 +33,9 @@ func TestWorkflowMetadataCanBeActivated(t *testing.T) { } func TestWorkflowMetadataCanBeArchived(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Archive = true @@ -49,7 +53,9 @@ func TestWorkflowMetadataCanBeArchived(t *testing.T) { } func TestWorkflowMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { config.Activate = true config.Archive = true @@ -61,7 +67,9 @@ func TestWorkflowMetadataCannotBeActivatedAndArchivedAtTheSameTime(t *testing.T) } func TestWorkflowMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ACTIVE config.Activate = true @@ -74,7 +82,9 @@ func TestWorkflowMetadataUpdateDoesNothingWhenThereAreNoChanges(t *testing.T) { } func TestWorkflowMetadataUpdateWithoutForceFlagFails(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -87,7 +97,9 @@ func TestWorkflowMetadataUpdateWithoutForceFlagFails(t *testing.T) { } func TestWorkflowMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -101,7 +113,9 @@ func TestWorkflowMetadataUpdateDoesNothingWithDryRunFlag(t *testing.T) { func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) { t.Run("without --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -116,7 +130,9 @@ func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) }) t.Run("with --force", func(t *testing.T) { - testNamedEntityUpdate(core.ResourceType_WORKFLOW, + testNamedEntityUpdate( + t, + core.ResourceType_WORKFLOW, /* setup */ func(s *testutils.TestStruct, config *NamedEntityConfig, namedEntity *admin.NamedEntity) { namedEntity.Metadata.State = admin.NamedEntityState_NAMED_ENTITY_ARCHIVED config.Activate = true @@ -133,6 +149,7 @@ func TestForceFlagIsIgnoredWithDryRunDuringWorkflowMetadataUpdate(t *testing.T) func TestWorkflowMetadataUpdateFailsWhenWorkflowDoesNotExist(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_WORKFLOW, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -154,6 +171,7 @@ func TestWorkflowMetadataUpdateFailsWhenWorkflowDoesNotExist(t *testing.T) { func TestWorkflowMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { testNamedEntityUpdateWithMockSetup( + t, core.ResourceType_WORKFLOW, /* mockSetup */ func(s *testutils.TestStruct, namedEntity *admin.NamedEntity) { s.MockAdminClient. @@ -178,7 +196,8 @@ func TestWorkflowMetadataUpdateFailsWhenAdminClientFails(t *testing.T) { } func TestWorkflowMetadataUpdateRequiresWorkflowName(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + config := &NamedEntityConfig{} err := getUpdateWorkflowFunc(config)(s.Ctx, nil, s.CmdCtx) diff --git a/flytectl/cmd/upgrade/upgrade_test.go b/flytectl/cmd/upgrade/upgrade_test.go index d4132f1df4..dd451d13b5 100644 --- a/flytectl/cmd/upgrade/upgrade_test.go +++ b/flytectl/cmd/upgrade/upgrade_test.go @@ -104,7 +104,8 @@ func TestSelfUpgrade(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful upgrade", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = version @@ -118,7 +119,8 @@ func TestSelfUpgradeError(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful upgrade", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = "v" @@ -133,7 +135,8 @@ func TestSelfUpgradeRollback(t *testing.T) { github.FlytectlReleaseConfig.OverrideExecutable = tempExt goos = platformutil.Linux t.Run("Successful rollback", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -142,7 +145,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback failed", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -151,7 +155,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback for windows", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" @@ -161,7 +166,8 @@ func TestSelfUpgradeRollback(t *testing.T) { }) t.Run("Successful rollback for windows", func(t *testing.T) { - s := testutils.Setup() + s := testutils.Setup(t) + var args = []string{rollBackSubCommand} stdlibversion.Build = "" stdlibversion.BuildTime = "" diff --git a/flytectl/cmd/version/version_test.go b/flytectl/cmd/version/version_test.go index 791a895e46..a397ab8199 100644 --- a/flytectl/cmd/version/version_test.go +++ b/flytectl/cmd/version/version_test.go @@ -54,8 +54,7 @@ func TestVersionCommand(t *testing.T) { func TestVersionCommandFunc(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = testVersion @@ -67,8 +66,7 @@ func TestVersionCommandFunc(t *testing.T) { func TestVersionCommandFuncError(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = "v" @@ -80,8 +78,7 @@ func TestVersionCommandFuncError(t *testing.T) { func TestVersionCommandFuncErr(t *testing.T) { ctx := context.Background() - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) stdlibversion.Build = "" stdlibversion.BuildTime = "" stdlibversion.Version = testVersion diff --git a/flytectl/pkg/sandbox/status_test.go b/flytectl/pkg/sandbox/status_test.go index 2bc3a0529c..2f49e4e434 100644 --- a/flytectl/pkg/sandbox/status_test.go +++ b/flytectl/pkg/sandbox/status_test.go @@ -14,15 +14,13 @@ import ( func TestSandboxStatus(t *testing.T) { t.Run("Sandbox status with zero result", func(t *testing.T) { mockDocker := &mocks.Docker{} - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) mockDocker.OnContainerList(s.Ctx, container.ListOptions{All: true}).Return([]types.Container{}, nil) err := PrintStatus(s.Ctx, mockDocker) assert.Nil(t, err) }) t.Run("Sandbox status with running sandbox", func(t *testing.T) { - s := testutils.Setup() - defer s.TearDown() + s := testutils.Setup(t) ctx := s.Ctx mockDocker := &mocks.Docker{} mockDocker.OnContainerList(ctx, container.ListOptions{All: true}).Return([]types.Container{ From a87585ab7cbb6a047c76d994b3f127c4210070fd Mon Sep 17 00:00:00 2001 From: Katrina Rogan Date: Tue, 5 Nov 2024 14:26:46 +0100 Subject: [PATCH 09/18] Reduce where clause fanout when updating workflow, node & task executions (#5953) --- flyteadmin/pkg/repositories/gormimpl/common.go | 4 ++++ flyteadmin/pkg/repositories/gormimpl/execution_repo.go | 2 +- flyteadmin/pkg/repositories/gormimpl/execution_repo_test.go | 5 +---- flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go | 2 +- .../pkg/repositories/gormimpl/node_execution_repo_test.go | 2 +- flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go | 3 ++- .../pkg/repositories/gormimpl/task_execution_repo_test.go | 2 +- 7 files changed, 11 insertions(+), 9 deletions(-) diff --git a/flyteadmin/pkg/repositories/gormimpl/common.go b/flyteadmin/pkg/repositories/gormimpl/common.go index 330555be8f..b103ef0e43 100644 --- a/flyteadmin/pkg/repositories/gormimpl/common.go +++ b/flyteadmin/pkg/repositories/gormimpl/common.go @@ -115,3 +115,7 @@ func applyScopedFilters(tx *gorm.DB, inlineFilters []common.InlineFilter, mapFil } return tx, nil } + +func getIDFilter(id uint) (query string, args interface{}) { + return fmt.Sprintf("%s = ?", ID), id +} diff --git a/flyteadmin/pkg/repositories/gormimpl/execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/execution_repo.go index 69345fc06d..d34f60ff64 100644 --- a/flyteadmin/pkg/repositories/gormimpl/execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/execution_repo.go @@ -68,7 +68,7 @@ func (r *ExecutionRepo) Get(ctx context.Context, input interfaces.Identifier) (m func (r *ExecutionRepo) Update(ctx context.Context, execution models.Execution) error { timer := r.metrics.UpdateDuration.Start() - tx := r.db.WithContext(ctx).Model(&execution).Updates(execution) + tx := r.db.WithContext(ctx).Model(&models.Execution{}).Where(getIDFilter(execution.ID)).Updates(execution) timer.Stop() if err := tx.Error; err != nil { return r.errorTransformer.ToFlyteAdminError(err) diff --git a/flyteadmin/pkg/repositories/gormimpl/execution_repo_test.go b/flyteadmin/pkg/repositories/gormimpl/execution_repo_test.go index 1b4068d4f1..e1e3117c6e 100644 --- a/flyteadmin/pkg/repositories/gormimpl/execution_repo_test.go +++ b/flyteadmin/pkg/repositories/gormimpl/execution_repo_test.go @@ -59,10 +59,7 @@ func TestUpdateExecution(t *testing.T) { updated := false // Only match on queries that append expected filters - GlobalMock.NewMock().WithQuery(`UPDATE "executions" SET "updated_at"=$1,"execution_project"=$2,` + - `"execution_domain"=$3,"execution_name"=$4,"launch_plan_id"=$5,"workflow_id"=$6,"phase"=$7,"closure"=$8,` + - `"spec"=$9,"started_at"=$10,"execution_created_at"=$11,"execution_updated_at"=$12,"duration"=$13 WHERE "` + - `execution_project" = $14 AND "execution_domain" = $15 AND "execution_name" = $16`).WithCallback( + GlobalMock.NewMock().WithQuery(`UPDATE "executions" SET "updated_at"=$1,"execution_project"=$2,"execution_domain"=$3,"execution_name"=$4,"launch_plan_id"=$5,"workflow_id"=$6,"phase"=$7,"closure"=$8,"spec"=$9,"started_at"=$10,"execution_created_at"=$11,"execution_updated_at"=$12,"duration"=$13 WHERE id = $14`).WithCallback( func(s string, values []driver.NamedValue) { updated = true }, diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go index 70833d4d77..b1772862dc 100644 --- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo.go @@ -97,7 +97,7 @@ func (r *NodeExecutionRepo) GetWithChildren(ctx context.Context, input interface func (r *NodeExecutionRepo) Update(ctx context.Context, nodeExecution *models.NodeExecution) error { timer := r.metrics.UpdateDuration.Start() - tx := r.db.WithContext(ctx).Model(&nodeExecution).Updates(nodeExecution) + tx := r.db.WithContext(ctx).Model(&models.NodeExecution{}).Where(getIDFilter(nodeExecution.ID)).Updates(nodeExecution) timer.Stop() if err := tx.Error; err != nil { return r.errorTransformer.ToFlyteAdminError(err) diff --git a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go index d35f8ac4f4..fe294b0a41 100644 --- a/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go +++ b/flyteadmin/pkg/repositories/gormimpl/node_execution_repo_test.go @@ -64,7 +64,7 @@ func TestUpdateNodeExecution(t *testing.T) { GlobalMock := mocket.Catcher.Reset() // Only match on queries that append the name filter nodeExecutionQuery := GlobalMock.NewMock() - nodeExecutionQuery.WithQuery(`UPDATE "node_executions" SET "id"=$1,"updated_at"=$2,"execution_project"=$3,"execution_domain"=$4,"execution_name"=$5,"node_id"=$6,"phase"=$7,"input_uri"=$8,"closure"=$9,"started_at"=$10,"node_execution_created_at"=$11,"node_execution_updated_at"=$12,"duration"=$13 WHERE "execution_project" = $14 AND "execution_domain" = $15 AND "execution_name" = $16 AND "node_id" = $17`) + nodeExecutionQuery.WithQuery(`UPDATE "node_executions" SET "id"=$1,"updated_at"=$2,"execution_project"=$3,"execution_domain"=$4,"execution_name"=$5,"node_id"=$6,"phase"=$7,"input_uri"=$8,"closure"=$9,"started_at"=$10,"node_execution_created_at"=$11,"node_execution_updated_at"=$12,"duration"=$13 WHERE id = $14`) err := nodeExecutionRepo.Update(context.Background(), &models.NodeExecution{ BaseModel: models.BaseModel{ID: 1}, diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go index c42d36b1bc..d4d30bef85 100644 --- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go +++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo.go @@ -81,7 +81,8 @@ func (r *TaskExecutionRepo) Get(ctx context.Context, input interfaces.GetTaskExe func (r *TaskExecutionRepo) Update(ctx context.Context, execution models.TaskExecution) error { timer := r.metrics.UpdateDuration.Start() - tx := r.db.WithContext(ctx).WithContext(ctx).Save(&execution) + tx := r.db.WithContext(ctx).Model(&models.TaskExecution{}).Where(getIDFilter(execution.ID)). + Updates(&execution) timer.Stop() if err := tx.Error; err != nil { diff --git a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go index 8ccee763c2..60a2ca2077 100644 --- a/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go +++ b/flyteadmin/pkg/repositories/gormimpl/task_execution_repo_test.go @@ -85,7 +85,7 @@ func TestUpdateTaskExecution(t *testing.T) { GlobalMock.Logging = true taskExecutionQuery := GlobalMock.NewMock() - taskExecutionQuery.WithQuery(`UPDATE "task_executions" SET "id"=$1,"created_at"=$2,"updated_at"=$3,"deleted_at"=$4,"phase"=$5,"phase_version"=$6,"input_uri"=$7,"closure"=$8,"started_at"=$9,"task_execution_created_at"=$10,"task_execution_updated_at"=$11,"duration"=$12 WHERE "project" = $13 AND "domain" = $14 AND "name" = $15 AND "version" = $16 AND "execution_project" = $17 AND "execution_domain" = $18 AND "execution_name" = $19 AND "node_id" = $20 AND "retry_attempt" = $21`) + taskExecutionQuery.WithQuery(`UPDATE "task_executions" SET "updated_at"=$1,"project"=$2,"domain"=$3,"name"=$4,"version"=$5,"execution_project"=$6,"execution_domain"=$7,"execution_name"=$8,"node_id"=$9,"retry_attempt"=$10,"phase"=$11,"input_uri"=$12,"closure"=$13,"started_at"=$14,"task_execution_created_at"=$15,"task_execution_updated_at"=$16,"duration"=$17 WHERE id = $18`) err := taskExecutionRepo.Update(context.Background(), testTaskExecution) assert.NoError(t, err) assert.True(t, taskExecutionQuery.Triggered) From 5471608a3f2ba0b31585db8eea9e0063db4e8a23 Mon Sep 17 00:00:00 2001 From: "Han-Ru Chen (Future-Outlier)" Date: Wed, 6 Nov 2024 01:19:38 +0800 Subject: [PATCH 10/18] [flyteadmin][API] get control plane version (#5934) * Gate feature by checking backend version Signed-off-by: Future-Outlier * dev Signed-off-by: Future-Outlier * update Signed-off-by: Future-Outlier --------- Signed-off-by: Future-Outlier --- Makefile | 4 ++-- flyteadmin/Makefile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index b3a37c85c4..a8ac961f02 100644 --- a/Makefile +++ b/Makefile @@ -6,10 +6,10 @@ define PIP_COMPILE pip-compile $(1) --upgrade --verbose --resolver=backtracking --annotation-style=line endef -GIT_VERSION := $(shell git describe --always --tags) +GIT_VERSION := $(shell git describe --tags --long --match "v*" --first-parent) GIT_HASH := $(shell git rev-parse --short HEAD) TIMESTAMP := $(shell date '+%Y-%m-%d') -PACKAGE ?=github.com/flyteorg/flytestdlib +PACKAGE ?=github.com/flyteorg/flyte/flytestdlib LD_FLAGS="-s -w -X $(PACKAGE)/version.Version=$(GIT_VERSION) -X $(PACKAGE)/version.Build=$(GIT_HASH) -X $(PACKAGE)/version.BuildTime=$(TIMESTAMP)" TMP_BUILD_DIR := .tmp_build diff --git a/flyteadmin/Makefile b/flyteadmin/Makefile index 4a715a02ce..9372c0bfb8 100644 --- a/flyteadmin/Makefile +++ b/flyteadmin/Makefile @@ -5,11 +5,11 @@ include ../boilerplate/flyte/docker_build/Makefile include ../boilerplate/flyte/golang_test_targets/Makefile include ../boilerplate/flyte/end2end/Makefile -GIT_VERSION := $(shell git describe --always --tags) +GIT_VERSION := $(shell git describe --tags --long --match "v*" --first-parent) GIT_HASH := $(shell git rev-parse --short HEAD) TIMESTAMP := $(shell date '+%Y-%m-%d') # TODO(monorepo): Do we need to change this? This is used in the service that provides a version. -PACKAGE ?=github.com/flyteorg/flytestdlib +PACKAGE ?=github.com/flyteorg/flyte/flytestdlib LD_FLAGS="-s -w -X $(PACKAGE)/version.Version=$(GIT_VERSION) -X $(PACKAGE)/version.Build=$(GIT_HASH) -X $(PACKAGE)/version.BuildTime=$(TIMESTAMP)" From ab67b4e5f95bae98814bb7b1ba0e3d5a3893544d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bu=C4=9Fra=20Gedik?= Date: Tue, 5 Nov 2024 09:45:21 -0800 Subject: [PATCH 11/18] Add multi file error aggregation strategy (#5795) Signed-off-by: Yee Hing Tong --- flyteidl/gen/pb-es/flyteidl/core/errors_pb.ts | 18 +- .../gen/pb-es/flyteidl/core/execution_pb.ts | 18 +- flyteidl/gen/pb-go/flyteidl/core/errors.pb.go | 104 ++++--- .../gen/pb-go/flyteidl/core/execution.pb.go | 238 ++++++++------- .../flyteidl/service/admin.swagger.json | 9 + flyteidl/gen/pb-js/flyteidl.d.ts | 24 ++ flyteidl/gen/pb-js/flyteidl.js | 72 +++++ .../gen/pb_python/flyteidl/core/errors_pb2.py | 15 +- .../pb_python/flyteidl/core/errors_pb2.pyi | 9 +- .../pb_python/flyteidl/core/execution_pb2.py | 55 ++-- .../pb_python/flyteidl/core/execution_pb2.pyi | 9 +- flyteidl/gen/pb_rust/flyteidl.core.rs | 12 + flyteidl/protos/flyteidl/core/errors.proto | 7 + flyteidl/protos/flyteidl/core/execution.proto | 5 + .../flytek8s/k8s_resource_adds.go | 3 + .../go/tasks/pluginmachinery/io/iface.go | 11 +- .../ioutils/remote_file_output_reader.go | 281 ++++++++++++++++-- .../ioutils/remote_file_output_reader_test.go | 145 ++++++++- .../go/tasks/pluginmachinery/k8s/plugin.go | 23 ++ .../go/tasks/plugins/array/outputs_test.go | 1 + .../k8s/kfoperators/pytorch/pytorch.go | 25 +- .../k8s/kfoperators/pytorch/pytorch_test.go | 26 +- .../nodes/task/k8s/plugin_manager.go | 7 +- .../nodes/task/k8s/plugin_manager_test.go | 5 + .../pkg/controller/workflow/executor_test.go | 3 +- flytestdlib/storage/storage.go | 5 + flytestdlib/storage/storage_test.go | 8 + flytestdlib/storage/stow_store.go | 10 +- flytestdlib/storage/stow_store_test.go | 6 +- 29 files changed, 913 insertions(+), 241 deletions(-) diff --git a/flyteidl/gen/pb-es/flyteidl/core/errors_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/errors_pb.ts index 42b70dec5b..4c508574c7 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/errors_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/errors_pb.ts @@ -4,7 +4,7 @@ // @ts-nocheck import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; -import { Message, proto3 } from "@bufbuild/protobuf"; +import { Message, proto3, Timestamp } from "@bufbuild/protobuf"; import { ExecutionError_ErrorKind } from "./execution_pb.js"; /** @@ -42,6 +42,20 @@ export class ContainerError extends Message { */ origin = ExecutionError_ErrorKind.UNKNOWN; + /** + * Timestamp of the error + * + * @generated from field: google.protobuf.Timestamp timestamp = 5; + */ + timestamp?: Timestamp; + + /** + * Worker that generated the error + * + * @generated from field: string worker = 6; + */ + worker = ""; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -54,6 +68,8 @@ export class ContainerError extends Message { { no: 2, name: "message", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 3, name: "kind", kind: "enum", T: proto3.getEnumType(ContainerError_Kind) }, { no: 4, name: "origin", kind: "enum", T: proto3.getEnumType(ExecutionError_ErrorKind) }, + { no: 5, name: "timestamp", kind: "message", T: Timestamp }, + { no: 6, name: "worker", kind: "scalar", T: 9 /* ScalarType.STRING */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): ContainerError { diff --git a/flyteidl/gen/pb-es/flyteidl/core/execution_pb.ts b/flyteidl/gen/pb-es/flyteidl/core/execution_pb.ts index 5283936b1f..d9d0a71718 100644 --- a/flyteidl/gen/pb-es/flyteidl/core/execution_pb.ts +++ b/flyteidl/gen/pb-es/flyteidl/core/execution_pb.ts @@ -4,7 +4,7 @@ // @ts-nocheck import type { BinaryReadOptions, FieldList, JsonReadOptions, JsonValue, PartialMessage, PlainMessage } from "@bufbuild/protobuf"; -import { Duration, Message, proto3 } from "@bufbuild/protobuf"; +import { Duration, Message, proto3, Timestamp } from "@bufbuild/protobuf"; /** * Indicates various phases of Workflow Execution @@ -341,6 +341,20 @@ export class ExecutionError extends Message { */ kind = ExecutionError_ErrorKind.UNKNOWN; + /** + * Timestamp of the error + * + * @generated from field: google.protobuf.Timestamp timestamp = 5; + */ + timestamp?: Timestamp; + + /** + * Worker that generated the error + * + * @generated from field: string worker = 6; + */ + worker = ""; + constructor(data?: PartialMessage) { super(); proto3.util.initPartial(data, this); @@ -353,6 +367,8 @@ export class ExecutionError extends Message { { no: 2, name: "message", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 3, name: "error_uri", kind: "scalar", T: 9 /* ScalarType.STRING */ }, { no: 4, name: "kind", kind: "enum", T: proto3.getEnumType(ExecutionError_ErrorKind) }, + { no: 5, name: "timestamp", kind: "message", T: Timestamp }, + { no: 6, name: "worker", kind: "scalar", T: 9 /* ScalarType.STRING */ }, ]); static fromBinary(bytes: Uint8Array, options?: Partial): ExecutionError { diff --git a/flyteidl/gen/pb-go/flyteidl/core/errors.pb.go b/flyteidl/gen/pb-go/flyteidl/core/errors.pb.go index 61e833ed1d..cb7640d053 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/errors.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/errors.pb.go @@ -9,6 +9,7 @@ package core import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -82,6 +83,10 @@ type ContainerError struct { Kind ContainerError_Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=flyteidl.core.ContainerError_Kind" json:"kind,omitempty"` // Defines the origin of the error (system, user, unknown). Origin ExecutionError_ErrorKind `protobuf:"varint,4,opt,name=origin,proto3,enum=flyteidl.core.ExecutionError_ErrorKind" json:"origin,omitempty"` + // Timestamp of the error + Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Worker that generated the error + Worker string `protobuf:"bytes,6,opt,name=worker,proto3" json:"worker,omitempty"` } func (x *ContainerError) Reset() { @@ -144,6 +149,20 @@ func (x *ContainerError) GetOrigin() ExecutionError_ErrorKind { return ExecutionError_UNKNOWN } +func (x *ContainerError) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *ContainerError) GetWorker() string { + if x != nil { + return x.Worker + } + return "" +} + // Defines the errors.pb file format the container can produce to communicate // failure reasons to the execution engine. type ErrorDocument struct { @@ -201,38 +220,45 @@ var file_flyteidl_core_errors_proto_rawDesc = []byte{ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe5, 0x01, 0x0a, 0x0e, 0x43, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x04, 0x6b, - 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, - 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, - 0x69, 0x6e, 0x64, 0x12, 0x3f, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, - 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x06, 0x6f, 0x72, - 0x69, 0x67, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x13, 0x0a, 0x0f, - 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x10, - 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c, 0x45, - 0x10, 0x01, 0x22, 0x44, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x6f, 0x63, 0x75, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, - 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, - 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0xb1, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0b, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, - 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, - 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, - 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, - 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, - 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, 0x46, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb7, 0x02, 0x0a, 0x0e, + 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, + 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x3f, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x06, 0x6f, + 0x72, 0x69, 0x67, 0x69, 0x6e, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x16, 0x0a, 0x06, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, + 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, + 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, + 0x42, 0x4c, 0x45, 0x10, 0x01, 0x22, 0x44, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x6f, + 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0xb1, 0x01, 0x0a, 0x11, + 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x42, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, + 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, + 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, + 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, + 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -254,16 +280,18 @@ var file_flyteidl_core_errors_proto_goTypes = []interface{}{ (*ContainerError)(nil), // 1: flyteidl.core.ContainerError (*ErrorDocument)(nil), // 2: flyteidl.core.ErrorDocument (ExecutionError_ErrorKind)(0), // 3: flyteidl.core.ExecutionError.ErrorKind + (*timestamppb.Timestamp)(nil), // 4: google.protobuf.Timestamp } var file_flyteidl_core_errors_proto_depIdxs = []int32{ 0, // 0: flyteidl.core.ContainerError.kind:type_name -> flyteidl.core.ContainerError.Kind 3, // 1: flyteidl.core.ContainerError.origin:type_name -> flyteidl.core.ExecutionError.ErrorKind - 1, // 2: flyteidl.core.ErrorDocument.error:type_name -> flyteidl.core.ContainerError - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name + 4, // 2: flyteidl.core.ContainerError.timestamp:type_name -> google.protobuf.Timestamp + 1, // 3: flyteidl.core.ErrorDocument.error:type_name -> flyteidl.core.ContainerError + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_flyteidl_core_errors_proto_init() } diff --git a/flyteidl/gen/pb-go/flyteidl/core/execution.pb.go b/flyteidl/gen/pb-go/flyteidl/core/execution.pb.go index 7befaca1ac..a17e94eba1 100644 --- a/flyteidl/gen/pb-go/flyteidl/core/execution.pb.go +++ b/flyteidl/gen/pb-go/flyteidl/core/execution.pb.go @@ -10,6 +10,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" ) @@ -514,6 +515,10 @@ type ExecutionError struct { // Full error contents accessible via a URI ErrorUri string `protobuf:"bytes,3,opt,name=error_uri,json=errorUri,proto3" json:"error_uri,omitempty"` Kind ExecutionError_ErrorKind `protobuf:"varint,4,opt,name=kind,proto3,enum=flyteidl.core.ExecutionError_ErrorKind" json:"kind,omitempty"` + // Timestamp of the error + Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Worker that generated the error + Worker string `protobuf:"bytes,6,opt,name=worker,proto3" json:"worker,omitempty"` } func (x *ExecutionError) Reset() { @@ -576,6 +581,20 @@ func (x *ExecutionError) GetKind() ExecutionError_ErrorKind { return ExecutionError_UNKNOWN } +func (x *ExecutionError) GetTimestamp() *timestamppb.Timestamp { + if x != nil { + return x.Timestamp + } + return nil +} + +func (x *ExecutionError) GetWorker() string { + if x != nil { + return x.Worker + } + return "" +} + // Log information for the task that is specific to a log sink // When our log story is flushed out, we may have more metadata here like log link expiry type TaskLog struct { @@ -803,102 +822,109 @@ var file_flyteidl_core_execution_proto_rawDesc = []byte{ 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa7, - 0x01, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x05, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x0d, + 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xa7, 0x01, 0x0a, 0x11, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x91, 0x01, 0x0a, 0x05, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, + 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, + 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, + 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x45, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, + 0x45, 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, + 0x47, 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x06, 0x12, + 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, + 0x54, 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x41, + 0x42, 0x4f, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x22, 0xb6, 0x01, 0x0a, 0x0d, 0x4e, 0x6f, + 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa4, 0x01, 0x0a, 0x05, + 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, + 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, + 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x07, 0x12, + 0x0d, 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x08, 0x12, 0x13, + 0x0a, 0x0f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, + 0x47, 0x10, 0x09, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x45, 0x44, + 0x10, 0x0a, 0x22, 0x96, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x05, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, - 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0e, 0x0a, 0x0a, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, - 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, - 0x44, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, - 0x10, 0x05, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0b, - 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x07, 0x12, 0x0d, 0x0a, 0x09, 0x54, - 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x08, 0x12, 0x0c, 0x0a, 0x08, 0x41, 0x42, - 0x4f, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x09, 0x22, 0xb6, 0x01, 0x0a, 0x0d, 0x4e, 0x6f, 0x64, - 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xa4, 0x01, 0x0a, 0x05, 0x50, - 0x68, 0x61, 0x73, 0x65, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, - 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, - 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, - 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x46, - 0x41, 0x49, 0x4c, 0x49, 0x4e, 0x47, 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, - 0x45, 0x44, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, - 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x07, 0x12, 0x0d, - 0x0a, 0x09, 0x54, 0x49, 0x4d, 0x45, 0x44, 0x5f, 0x4f, 0x55, 0x54, 0x10, 0x08, 0x12, 0x13, 0x0a, - 0x0f, 0x44, 0x59, 0x4e, 0x41, 0x4d, 0x49, 0x43, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, - 0x10, 0x09, 0x12, 0x0d, 0x0a, 0x09, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x45, 0x44, 0x10, - 0x0a, 0x22, 0x96, 0x01, 0x0a, 0x0d, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0x84, 0x01, 0x0a, 0x05, 0x50, 0x68, 0x61, 0x73, 0x65, 0x12, 0x0d, 0x0a, - 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, - 0x51, 0x55, 0x45, 0x55, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x55, 0x4e, 0x4e, - 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, 0x44, - 0x45, 0x44, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, - 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x05, 0x12, 0x10, 0x0a, - 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, - 0x19, 0x0a, 0x15, 0x57, 0x41, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, 0x52, - 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x53, 0x10, 0x07, 0x22, 0xc8, 0x01, 0x0a, 0x0e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x52, - 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x2e, 0x0a, 0x09, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, - 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, - 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x53, - 0x54, 0x45, 0x4d, 0x10, 0x02, 0x22, 0xb2, 0x02, 0x0a, 0x07, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, - 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x6d, 0x65, 0x73, 0x73, 0x61, - 0x67, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x46, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x74, 0x74, - 0x6c, 0x12, 0x2a, 0x0a, 0x10, 0x53, 0x68, 0x6f, 0x77, 0x57, 0x68, 0x69, 0x6c, 0x65, 0x50, 0x65, - 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x53, 0x68, 0x6f, - 0x77, 0x57, 0x68, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x12, 0x2a, 0x0a, - 0x10, 0x48, 0x69, 0x64, 0x65, 0x4f, 0x6e, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x48, 0x69, 0x64, 0x65, 0x4f, 0x6e, 0x63, - 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x0d, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x53, 0x56, 0x10, 0x01, - 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x5a, 0x0a, 0x14, 0x51, 0x75, - 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x70, - 0x65, 0x63, 0x12, 0x42, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x69, 0x6e, 0x67, 0x5f, 0x62, - 0x75, 0x64, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x69, 0x6e, 0x67, - 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x10, 0x51, 0x75, 0x61, 0x6c, 0x69, - 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x04, 0x74, - 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, - 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x69, 0x65, 0x72, 0x48, - 0x00, 0x52, 0x04, 0x74, 0x69, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x04, 0x73, 0x70, 0x65, 0x63, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, 0x04, 0x73, 0x70, - 0x65, 0x63, 0x22, 0x34, 0x0a, 0x04, 0x54, 0x69, 0x65, 0x72, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x4e, - 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x49, 0x47, - 0x48, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, 0x10, 0x02, 0x12, - 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x42, 0x0d, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x69, - 0x67, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0xb4, 0x01, 0x0a, 0x11, 0x63, 0x6f, 0x6d, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, - 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, 0x2f, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, - 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x43, 0x6f, 0x72, - 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, - 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, 0x43, 0x6f, 0x72, - 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0e, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x55, 0x43, 0x43, 0x45, 0x45, + 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x41, 0x42, 0x4f, 0x52, 0x54, 0x45, 0x44, + 0x10, 0x04, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x05, 0x12, 0x10, + 0x0a, 0x0c, 0x49, 0x4e, 0x49, 0x54, 0x49, 0x41, 0x4c, 0x49, 0x5a, 0x49, 0x4e, 0x47, 0x10, 0x06, + 0x12, 0x19, 0x0a, 0x15, 0x57, 0x41, 0x49, 0x54, 0x49, 0x4e, 0x47, 0x5f, 0x46, 0x4f, 0x52, 0x5f, + 0x52, 0x45, 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x53, 0x10, 0x07, 0x22, 0x9a, 0x02, 0x0a, 0x0e, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, + 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, + 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x12, 0x16, 0x0a, 0x06, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x22, 0x2e, 0x0a, 0x09, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, + 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x55, 0x53, 0x45, 0x52, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, + 0x53, 0x59, 0x53, 0x54, 0x45, 0x4d, 0x10, 0x02, 0x22, 0xb2, 0x02, 0x0a, 0x07, 0x54, 0x61, 0x73, + 0x6b, 0x4c, 0x6f, 0x67, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x69, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x75, 0x72, 0x69, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4b, 0x0a, 0x0e, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x52, 0x0d, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x03, 0x74, 0x74, 0x6c, 0x12, 0x2a, 0x0a, 0x10, 0x53, 0x68, 0x6f, 0x77, 0x57, 0x68, 0x69, 0x6c, + 0x65, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x53, 0x68, 0x6f, 0x77, 0x57, 0x68, 0x69, 0x6c, 0x65, 0x50, 0x65, 0x6e, 0x64, 0x69, 0x6e, 0x67, + 0x12, 0x2a, 0x0a, 0x10, 0x48, 0x69, 0x64, 0x65, 0x4f, 0x6e, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, + 0x73, 0x68, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x48, 0x69, 0x64, 0x65, + 0x4f, 0x6e, 0x63, 0x65, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x22, 0x2f, 0x0a, 0x0d, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x0b, 0x0a, + 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x43, 0x53, + 0x56, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x53, 0x4f, 0x4e, 0x10, 0x02, 0x22, 0x5a, 0x0a, + 0x14, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x53, 0x70, 0x65, 0x63, 0x12, 0x42, 0x0a, 0x0f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x69, 0x6e, + 0x67, 0x5f, 0x62, 0x75, 0x64, 0x67, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x69, 0x6e, 0x67, 0x42, 0x75, 0x64, 0x67, 0x65, 0x74, 0x22, 0xce, 0x01, 0x0a, 0x10, 0x51, 0x75, + 0x61, 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x3a, + 0x0a, 0x04, 0x74, 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, + 0x6c, 0x69, 0x74, 0x79, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x54, 0x69, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x04, 0x74, 0x69, 0x65, 0x72, 0x12, 0x39, 0x0a, 0x04, 0x73, 0x70, + 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x51, 0x75, 0x61, 0x6c, 0x69, 0x74, 0x79, + 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x53, 0x70, 0x65, 0x63, 0x48, 0x00, 0x52, + 0x04, 0x73, 0x70, 0x65, 0x63, 0x22, 0x34, 0x0a, 0x04, 0x54, 0x69, 0x65, 0x72, 0x12, 0x0d, 0x0a, + 0x09, 0x55, 0x4e, 0x44, 0x45, 0x46, 0x49, 0x4e, 0x45, 0x44, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x49, 0x47, 0x48, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x4d, 0x45, 0x44, 0x49, 0x55, 0x4d, + 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x4c, 0x4f, 0x57, 0x10, 0x03, 0x42, 0x0d, 0x0a, 0x0b, 0x64, + 0x65, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0xb4, 0x01, 0x0a, 0x11, 0x63, + 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x42, 0x0e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x70, 0x62, 0x2d, 0x67, 0x6f, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, + 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x2e, + 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, + 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x5c, + 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x3a, 0x3a, 0x43, 0x6f, 0x72, + 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -929,20 +955,22 @@ var file_flyteidl_core_execution_proto_goTypes = []interface{}{ (*TaskLog)(nil), // 10: flyteidl.core.TaskLog (*QualityOfServiceSpec)(nil), // 11: flyteidl.core.QualityOfServiceSpec (*QualityOfService)(nil), // 12: flyteidl.core.QualityOfService - (*durationpb.Duration)(nil), // 13: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 13: google.protobuf.Timestamp + (*durationpb.Duration)(nil), // 14: google.protobuf.Duration } var file_flyteidl_core_execution_proto_depIdxs = []int32{ 3, // 0: flyteidl.core.ExecutionError.kind:type_name -> flyteidl.core.ExecutionError.ErrorKind - 4, // 1: flyteidl.core.TaskLog.message_format:type_name -> flyteidl.core.TaskLog.MessageFormat - 13, // 2: flyteidl.core.TaskLog.ttl:type_name -> google.protobuf.Duration - 13, // 3: flyteidl.core.QualityOfServiceSpec.queueing_budget:type_name -> google.protobuf.Duration - 5, // 4: flyteidl.core.QualityOfService.tier:type_name -> flyteidl.core.QualityOfService.Tier - 11, // 5: flyteidl.core.QualityOfService.spec:type_name -> flyteidl.core.QualityOfServiceSpec - 6, // [6:6] is the sub-list for method output_type - 6, // [6:6] is the sub-list for method input_type - 6, // [6:6] is the sub-list for extension type_name - 6, // [6:6] is the sub-list for extension extendee - 0, // [0:6] is the sub-list for field type_name + 13, // 1: flyteidl.core.ExecutionError.timestamp:type_name -> google.protobuf.Timestamp + 4, // 2: flyteidl.core.TaskLog.message_format:type_name -> flyteidl.core.TaskLog.MessageFormat + 14, // 3: flyteidl.core.TaskLog.ttl:type_name -> google.protobuf.Duration + 14, // 4: flyteidl.core.QualityOfServiceSpec.queueing_budget:type_name -> google.protobuf.Duration + 5, // 5: flyteidl.core.QualityOfService.tier:type_name -> flyteidl.core.QualityOfService.Tier + 11, // 6: flyteidl.core.QualityOfService.spec:type_name -> flyteidl.core.QualityOfServiceSpec + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name } func init() { file_flyteidl_core_execution_proto_init() } diff --git a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json index 241baeb53c..c4f6f3ef7f 100644 --- a/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json +++ b/flyteidl/gen/pb-go/gateway/flyteidl/service/admin.swagger.json @@ -7153,6 +7153,15 @@ }, "kind": { "$ref": "#/definitions/ExecutionErrorErrorKind" + }, + "timestamp": { + "type": "string", + "format": "date-time", + "title": "Timestamp of the error" + }, + "worker": { + "type": "string", + "title": "Worker that generated the error" } }, "description": "Represents the error message from the execution." diff --git a/flyteidl/gen/pb-js/flyteidl.d.ts b/flyteidl/gen/pb-js/flyteidl.d.ts index 0ff2422577..73b5a73eaa 100644 --- a/flyteidl/gen/pb-js/flyteidl.d.ts +++ b/flyteidl/gen/pb-js/flyteidl.d.ts @@ -5748,6 +5748,12 @@ export namespace flyteidl { /** ExecutionError kind */ kind?: (flyteidl.core.ExecutionError.ErrorKind|null); + + /** ExecutionError timestamp */ + timestamp?: (google.protobuf.ITimestamp|null); + + /** ExecutionError worker */ + worker?: (string|null); } /** Represents an ExecutionError. */ @@ -5771,6 +5777,12 @@ export namespace flyteidl { /** ExecutionError kind. */ public kind: flyteidl.core.ExecutionError.ErrorKind; + /** ExecutionError timestamp. */ + public timestamp?: (google.protobuf.ITimestamp|null); + + /** ExecutionError worker. */ + public worker: string; + /** * Creates a new ExecutionError instance using the specified properties. * @param [properties] Properties to set @@ -7555,6 +7567,12 @@ export namespace flyteidl { /** ContainerError origin */ origin?: (flyteidl.core.ExecutionError.ErrorKind|null); + + /** ContainerError timestamp */ + timestamp?: (google.protobuf.ITimestamp|null); + + /** ContainerError worker */ + worker?: (string|null); } /** Represents a ContainerError. */ @@ -7578,6 +7596,12 @@ export namespace flyteidl { /** ContainerError origin. */ public origin: flyteidl.core.ExecutionError.ErrorKind; + /** ContainerError timestamp. */ + public timestamp?: (google.protobuf.ITimestamp|null); + + /** ContainerError worker. */ + public worker: string; + /** * Creates a new ContainerError instance using the specified properties. * @param [properties] Properties to set diff --git a/flyteidl/gen/pb-js/flyteidl.js b/flyteidl/gen/pb-js/flyteidl.js index 042343eecf..970a69229c 100644 --- a/flyteidl/gen/pb-js/flyteidl.js +++ b/flyteidl/gen/pb-js/flyteidl.js @@ -13823,6 +13823,8 @@ * @property {string|null} [message] ExecutionError message * @property {string|null} [errorUri] ExecutionError errorUri * @property {flyteidl.core.ExecutionError.ErrorKind|null} [kind] ExecutionError kind + * @property {google.protobuf.ITimestamp|null} [timestamp] ExecutionError timestamp + * @property {string|null} [worker] ExecutionError worker */ /** @@ -13872,6 +13874,22 @@ */ ExecutionError.prototype.kind = 0; + /** + * ExecutionError timestamp. + * @member {google.protobuf.ITimestamp|null|undefined} timestamp + * @memberof flyteidl.core.ExecutionError + * @instance + */ + ExecutionError.prototype.timestamp = null; + + /** + * ExecutionError worker. + * @member {string} worker + * @memberof flyteidl.core.ExecutionError + * @instance + */ + ExecutionError.prototype.worker = ""; + /** * Creates a new ExecutionError instance using the specified properties. * @function create @@ -13904,6 +13922,10 @@ writer.uint32(/* id 3, wireType 2 =*/26).string(message.errorUri); if (message.kind != null && message.hasOwnProperty("kind")) writer.uint32(/* id 4, wireType 0 =*/32).int32(message.kind); + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + $root.google.protobuf.Timestamp.encode(message.timestamp, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.worker != null && message.hasOwnProperty("worker")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.worker); return writer; }; @@ -13937,6 +13959,12 @@ case 4: message.kind = reader.int32(); break; + case 5: + message.timestamp = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + case 6: + message.worker = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -13974,6 +14002,14 @@ case 2: break; } + if (message.timestamp != null && message.hasOwnProperty("timestamp")) { + var error = $root.google.protobuf.Timestamp.verify(message.timestamp); + if (error) + return "timestamp." + error; + } + if (message.worker != null && message.hasOwnProperty("worker")) + if (!$util.isString(message.worker)) + return "worker: string expected"; return null; }; @@ -18268,6 +18304,8 @@ * @property {string|null} [message] ContainerError message * @property {flyteidl.core.ContainerError.Kind|null} [kind] ContainerError kind * @property {flyteidl.core.ExecutionError.ErrorKind|null} [origin] ContainerError origin + * @property {google.protobuf.ITimestamp|null} [timestamp] ContainerError timestamp + * @property {string|null} [worker] ContainerError worker */ /** @@ -18317,6 +18355,22 @@ */ ContainerError.prototype.origin = 0; + /** + * ContainerError timestamp. + * @member {google.protobuf.ITimestamp|null|undefined} timestamp + * @memberof flyteidl.core.ContainerError + * @instance + */ + ContainerError.prototype.timestamp = null; + + /** + * ContainerError worker. + * @member {string} worker + * @memberof flyteidl.core.ContainerError + * @instance + */ + ContainerError.prototype.worker = ""; + /** * Creates a new ContainerError instance using the specified properties. * @function create @@ -18349,6 +18403,10 @@ writer.uint32(/* id 3, wireType 0 =*/24).int32(message.kind); if (message.origin != null && message.hasOwnProperty("origin")) writer.uint32(/* id 4, wireType 0 =*/32).int32(message.origin); + if (message.timestamp != null && message.hasOwnProperty("timestamp")) + $root.google.protobuf.Timestamp.encode(message.timestamp, writer.uint32(/* id 5, wireType 2 =*/42).fork()).ldelim(); + if (message.worker != null && message.hasOwnProperty("worker")) + writer.uint32(/* id 6, wireType 2 =*/50).string(message.worker); return writer; }; @@ -18382,6 +18440,12 @@ case 4: message.origin = reader.int32(); break; + case 5: + message.timestamp = $root.google.protobuf.Timestamp.decode(reader, reader.uint32()); + break; + case 6: + message.worker = reader.string(); + break; default: reader.skipType(tag & 7); break; @@ -18424,6 +18488,14 @@ case 2: break; } + if (message.timestamp != null && message.hasOwnProperty("timestamp")) { + var error = $root.google.protobuf.Timestamp.verify(message.timestamp); + if (error) + return "timestamp." + error; + } + if (message.worker != null && message.hasOwnProperty("worker")) + if (!$util.isString(message.worker)) + return "worker: string expected"; return null; }; diff --git a/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.py index 68182fd259..fe1be689e4 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.py @@ -12,9 +12,10 @@ from flyteidl.core import execution_pb2 as flyteidl_dot_core_dot_execution__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/core/errors.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/execution.proto\"\xe5\x01\n\x0e\x43ontainerError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x36\n\x04kind\x18\x03 \x01(\x0e\x32\".flyteidl.core.ContainerError.KindR\x04kind\x12?\n\x06origin\x18\x04 \x01(\x0e\x32\'.flyteidl.core.ExecutionError.ErrorKindR\x06origin\",\n\x04Kind\x12\x13\n\x0fNON_RECOVERABLE\x10\x00\x12\x0f\n\x0bRECOVERABLE\x10\x01\"D\n\rErrorDocument\x12\x33\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x1d.flyteidl.core.ContainerErrorR\x05\x65rrorB\xb1\x01\n\x11\x63om.flyteidl.coreB\x0b\x45rrorsProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1a\x66lyteidl/core/errors.proto\x12\rflyteidl.core\x1a\x1d\x66lyteidl/core/execution.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xb7\x02\n\x0e\x43ontainerError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x36\n\x04kind\x18\x03 \x01(\x0e\x32\".flyteidl.core.ContainerError.KindR\x04kind\x12?\n\x06origin\x18\x04 \x01(\x0e\x32\'.flyteidl.core.ExecutionError.ErrorKindR\x06origin\x12\x38\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ttimestamp\x12\x16\n\x06worker\x18\x06 \x01(\tR\x06worker\",\n\x04Kind\x12\x13\n\x0fNON_RECOVERABLE\x10\x00\x12\x0f\n\x0bRECOVERABLE\x10\x01\"D\n\rErrorDocument\x12\x33\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x1d.flyteidl.core.ContainerErrorR\x05\x65rrorB\xb1\x01\n\x11\x63om.flyteidl.coreB\x0b\x45rrorsProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -23,10 +24,10 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\021com.flyteidl.coreB\013ErrorsProtoP\001Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\242\002\003FCX\252\002\rFlyteidl.Core\312\002\rFlyteidl\\Core\342\002\031Flyteidl\\Core\\GPBMetadata\352\002\016Flyteidl::Core' - _globals['_CONTAINERERROR']._serialized_start=77 - _globals['_CONTAINERERROR']._serialized_end=306 - _globals['_CONTAINERERROR_KIND']._serialized_start=262 - _globals['_CONTAINERERROR_KIND']._serialized_end=306 - _globals['_ERRORDOCUMENT']._serialized_start=308 - _globals['_ERRORDOCUMENT']._serialized_end=376 + _globals['_CONTAINERERROR']._serialized_start=110 + _globals['_CONTAINERERROR']._serialized_end=421 + _globals['_CONTAINERERROR_KIND']._serialized_start=377 + _globals['_CONTAINERERROR_KIND']._serialized_end=421 + _globals['_ERRORDOCUMENT']._serialized_start=423 + _globals['_ERRORDOCUMENT']._serialized_end=491 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.pyi index b13aa40915..c0566c73ad 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/errors_pb2.pyi @@ -1,4 +1,5 @@ from flyteidl.core import execution_pb2 as _execution_pb2 +from google.protobuf import timestamp_pb2 as _timestamp_pb2 from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -7,7 +8,7 @@ from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Opti DESCRIPTOR: _descriptor.FileDescriptor class ContainerError(_message.Message): - __slots__ = ["code", "message", "kind", "origin"] + __slots__ = ["code", "message", "kind", "origin", "timestamp", "worker"] class Kind(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] NON_RECOVERABLE: _ClassVar[ContainerError.Kind] @@ -18,11 +19,15 @@ class ContainerError(_message.Message): MESSAGE_FIELD_NUMBER: _ClassVar[int] KIND_FIELD_NUMBER: _ClassVar[int] ORIGIN_FIELD_NUMBER: _ClassVar[int] + TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + WORKER_FIELD_NUMBER: _ClassVar[int] code: str message: str kind: ContainerError.Kind origin: _execution_pb2.ExecutionError.ErrorKind - def __init__(self, code: _Optional[str] = ..., message: _Optional[str] = ..., kind: _Optional[_Union[ContainerError.Kind, str]] = ..., origin: _Optional[_Union[_execution_pb2.ExecutionError.ErrorKind, str]] = ...) -> None: ... + timestamp: _timestamp_pb2.Timestamp + worker: str + def __init__(self, code: _Optional[str] = ..., message: _Optional[str] = ..., kind: _Optional[_Union[ContainerError.Kind, str]] = ..., origin: _Optional[_Union[_execution_pb2.ExecutionError.ErrorKind, str]] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., worker: _Optional[str] = ...) -> None: ... class ErrorDocument(_message.Message): __slots__ = ["error"] diff --git a/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.py b/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.py index 2d59497e3a..2ff8f47010 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.py +++ b/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.py @@ -12,9 +12,10 @@ from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1d\x66lyteidl/core/execution.proto\x12\rflyteidl.core\x1a\x1egoogle/protobuf/duration.proto\"\xa7\x01\n\x11WorkflowExecution\"\x91\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x0e\n\nSUCCEEDING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\x0b\n\x07\x46\x41ILING\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\x0b\n\x07\x41\x42ORTED\x10\x07\x12\r\n\tTIMED_OUT\x10\x08\x12\x0c\n\x08\x41\x42ORTING\x10\t\"\xb6\x01\n\rNodeExecution\"\xa4\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tSUCCEEDED\x10\x03\x12\x0b\n\x07\x46\x41ILING\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x0b\n\x07\x41\x42ORTED\x10\x06\x12\x0b\n\x07SKIPPED\x10\x07\x12\r\n\tTIMED_OUT\x10\x08\x12\x13\n\x0f\x44YNAMIC_RUNNING\x10\t\x12\r\n\tRECOVERED\x10\n\"\x96\x01\n\rTaskExecution\"\x84\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tSUCCEEDED\x10\x03\x12\x0b\n\x07\x41\x42ORTED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x10\n\x0cINITIALIZING\x10\x06\x12\x19\n\x15WAITING_FOR_RESOURCES\x10\x07\"\xc8\x01\n\x0e\x45xecutionError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x1b\n\terror_uri\x18\x03 \x01(\tR\x08\x65rrorUri\x12;\n\x04kind\x18\x04 \x01(\x0e\x32\'.flyteidl.core.ExecutionError.ErrorKindR\x04kind\".\n\tErrorKind\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04USER\x10\x01\x12\n\n\x06SYSTEM\x10\x02\"\xb2\x02\n\x07TaskLog\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12K\n\x0emessage_format\x18\x03 \x01(\x0e\x32$.flyteidl.core.TaskLog.MessageFormatR\rmessageFormat\x12+\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x03ttl\x12*\n\x10ShowWhilePending\x18\x05 \x01(\x08R\x10ShowWhilePending\x12*\n\x10HideOnceFinished\x18\x06 \x01(\x08R\x10HideOnceFinished\"/\n\rMessageFormat\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x43SV\x10\x01\x12\x08\n\x04JSON\x10\x02\"Z\n\x14QualityOfServiceSpec\x12\x42\n\x0fqueueing_budget\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0equeueingBudget\"\xce\x01\n\x10QualityOfService\x12:\n\x04tier\x18\x01 \x01(\x0e\x32$.flyteidl.core.QualityOfService.TierH\x00R\x04tier\x12\x39\n\x04spec\x18\x02 \x01(\x0b\x32#.flyteidl.core.QualityOfServiceSpecH\x00R\x04spec\"4\n\x04Tier\x12\r\n\tUNDEFINED\x10\x00\x12\x08\n\x04HIGH\x10\x01\x12\n\n\x06MEDIUM\x10\x02\x12\x07\n\x03LOW\x10\x03\x42\r\n\x0b\x64\x65signationB\xb4\x01\n\x11\x63om.flyteidl.coreB\x0e\x45xecutionProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1d\x66lyteidl/core/execution.proto\x12\rflyteidl.core\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xa7\x01\n\x11WorkflowExecution\"\x91\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\x0e\n\nSUCCEEDING\x10\x03\x12\r\n\tSUCCEEDED\x10\x04\x12\x0b\n\x07\x46\x41ILING\x10\x05\x12\n\n\x06\x46\x41ILED\x10\x06\x12\x0b\n\x07\x41\x42ORTED\x10\x07\x12\r\n\tTIMED_OUT\x10\x08\x12\x0c\n\x08\x41\x42ORTING\x10\t\"\xb6\x01\n\rNodeExecution\"\xa4\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tSUCCEEDED\x10\x03\x12\x0b\n\x07\x46\x41ILING\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x0b\n\x07\x41\x42ORTED\x10\x06\x12\x0b\n\x07SKIPPED\x10\x07\x12\r\n\tTIMED_OUT\x10\x08\x12\x13\n\x0f\x44YNAMIC_RUNNING\x10\t\x12\r\n\tRECOVERED\x10\n\"\x96\x01\n\rTaskExecution\"\x84\x01\n\x05Phase\x12\r\n\tUNDEFINED\x10\x00\x12\n\n\x06QUEUED\x10\x01\x12\x0b\n\x07RUNNING\x10\x02\x12\r\n\tSUCCEEDED\x10\x03\x12\x0b\n\x07\x41\x42ORTED\x10\x04\x12\n\n\x06\x46\x41ILED\x10\x05\x12\x10\n\x0cINITIALIZING\x10\x06\x12\x19\n\x15WAITING_FOR_RESOURCES\x10\x07\"\x9a\x02\n\x0e\x45xecutionError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x1b\n\terror_uri\x18\x03 \x01(\tR\x08\x65rrorUri\x12;\n\x04kind\x18\x04 \x01(\x0e\x32\'.flyteidl.core.ExecutionError.ErrorKindR\x04kind\x12\x38\n\ttimestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\ttimestamp\x12\x16\n\x06worker\x18\x06 \x01(\tR\x06worker\".\n\tErrorKind\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x08\n\x04USER\x10\x01\x12\n\n\x06SYSTEM\x10\x02\"\xb2\x02\n\x07TaskLog\x12\x10\n\x03uri\x18\x01 \x01(\tR\x03uri\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12K\n\x0emessage_format\x18\x03 \x01(\x0e\x32$.flyteidl.core.TaskLog.MessageFormatR\rmessageFormat\x12+\n\x03ttl\x18\x04 \x01(\x0b\x32\x19.google.protobuf.DurationR\x03ttl\x12*\n\x10ShowWhilePending\x18\x05 \x01(\x08R\x10ShowWhilePending\x12*\n\x10HideOnceFinished\x18\x06 \x01(\x08R\x10HideOnceFinished\"/\n\rMessageFormat\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x07\n\x03\x43SV\x10\x01\x12\x08\n\x04JSON\x10\x02\"Z\n\x14QualityOfServiceSpec\x12\x42\n\x0fqueueing_budget\x18\x01 \x01(\x0b\x32\x19.google.protobuf.DurationR\x0equeueingBudget\"\xce\x01\n\x10QualityOfService\x12:\n\x04tier\x18\x01 \x01(\x0e\x32$.flyteidl.core.QualityOfService.TierH\x00R\x04tier\x12\x39\n\x04spec\x18\x02 \x01(\x0b\x32#.flyteidl.core.QualityOfServiceSpecH\x00R\x04spec\"4\n\x04Tier\x12\r\n\tUNDEFINED\x10\x00\x12\x08\n\x04HIGH\x10\x01\x12\n\n\x06MEDIUM\x10\x02\x12\x07\n\x03LOW\x10\x03\x42\r\n\x0b\x64\x65signationB\xb4\x01\n\x11\x63om.flyteidl.coreB\x0e\x45xecutionProtoP\x01Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\xa2\x02\x03\x46\x43X\xaa\x02\rFlyteidl.Core\xca\x02\rFlyteidl\\Core\xe2\x02\x19\x46lyteidl\\Core\\GPBMetadata\xea\x02\x0e\x46lyteidl::Coreb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -23,30 +24,30 @@ DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\021com.flyteidl.coreB\016ExecutionProtoP\001Z:github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core\242\002\003FCX\252\002\rFlyteidl.Core\312\002\rFlyteidl\\Core\342\002\031Flyteidl\\Core\\GPBMetadata\352\002\016Flyteidl::Core' - _globals['_WORKFLOWEXECUTION']._serialized_start=81 - _globals['_WORKFLOWEXECUTION']._serialized_end=248 - _globals['_WORKFLOWEXECUTION_PHASE']._serialized_start=103 - _globals['_WORKFLOWEXECUTION_PHASE']._serialized_end=248 - _globals['_NODEEXECUTION']._serialized_start=251 - _globals['_NODEEXECUTION']._serialized_end=433 - _globals['_NODEEXECUTION_PHASE']._serialized_start=269 - _globals['_NODEEXECUTION_PHASE']._serialized_end=433 - _globals['_TASKEXECUTION']._serialized_start=436 - _globals['_TASKEXECUTION']._serialized_end=586 - _globals['_TASKEXECUTION_PHASE']._serialized_start=454 - _globals['_TASKEXECUTION_PHASE']._serialized_end=586 - _globals['_EXECUTIONERROR']._serialized_start=589 - _globals['_EXECUTIONERROR']._serialized_end=789 - _globals['_EXECUTIONERROR_ERRORKIND']._serialized_start=743 - _globals['_EXECUTIONERROR_ERRORKIND']._serialized_end=789 - _globals['_TASKLOG']._serialized_start=792 - _globals['_TASKLOG']._serialized_end=1098 - _globals['_TASKLOG_MESSAGEFORMAT']._serialized_start=1051 - _globals['_TASKLOG_MESSAGEFORMAT']._serialized_end=1098 - _globals['_QUALITYOFSERVICESPEC']._serialized_start=1100 - _globals['_QUALITYOFSERVICESPEC']._serialized_end=1190 - _globals['_QUALITYOFSERVICE']._serialized_start=1193 - _globals['_QUALITYOFSERVICE']._serialized_end=1399 - _globals['_QUALITYOFSERVICE_TIER']._serialized_start=1332 - _globals['_QUALITYOFSERVICE_TIER']._serialized_end=1384 + _globals['_WORKFLOWEXECUTION']._serialized_start=114 + _globals['_WORKFLOWEXECUTION']._serialized_end=281 + _globals['_WORKFLOWEXECUTION_PHASE']._serialized_start=136 + _globals['_WORKFLOWEXECUTION_PHASE']._serialized_end=281 + _globals['_NODEEXECUTION']._serialized_start=284 + _globals['_NODEEXECUTION']._serialized_end=466 + _globals['_NODEEXECUTION_PHASE']._serialized_start=302 + _globals['_NODEEXECUTION_PHASE']._serialized_end=466 + _globals['_TASKEXECUTION']._serialized_start=469 + _globals['_TASKEXECUTION']._serialized_end=619 + _globals['_TASKEXECUTION_PHASE']._serialized_start=487 + _globals['_TASKEXECUTION_PHASE']._serialized_end=619 + _globals['_EXECUTIONERROR']._serialized_start=622 + _globals['_EXECUTIONERROR']._serialized_end=904 + _globals['_EXECUTIONERROR_ERRORKIND']._serialized_start=858 + _globals['_EXECUTIONERROR_ERRORKIND']._serialized_end=904 + _globals['_TASKLOG']._serialized_start=907 + _globals['_TASKLOG']._serialized_end=1213 + _globals['_TASKLOG_MESSAGEFORMAT']._serialized_start=1166 + _globals['_TASKLOG_MESSAGEFORMAT']._serialized_end=1213 + _globals['_QUALITYOFSERVICESPEC']._serialized_start=1215 + _globals['_QUALITYOFSERVICESPEC']._serialized_end=1305 + _globals['_QUALITYOFSERVICE']._serialized_start=1308 + _globals['_QUALITYOFSERVICE']._serialized_end=1514 + _globals['_QUALITYOFSERVICE_TIER']._serialized_start=1447 + _globals['_QUALITYOFSERVICE_TIER']._serialized_end=1499 # @@protoc_insertion_point(module_scope) diff --git a/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.pyi b/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.pyi index 5c28a55418..08f1937c08 100644 --- a/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.pyi +++ b/flyteidl/gen/pb_python/flyteidl/core/execution_pb2.pyi @@ -1,4 +1,5 @@ from google.protobuf import duration_pb2 as _duration_pb2 +from google.protobuf import timestamp_pb2 as _timestamp_pb2 from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message @@ -83,7 +84,7 @@ class TaskExecution(_message.Message): def __init__(self) -> None: ... class ExecutionError(_message.Message): - __slots__ = ["code", "message", "error_uri", "kind"] + __slots__ = ["code", "message", "error_uri", "kind", "timestamp", "worker"] class ErrorKind(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): __slots__ = [] UNKNOWN: _ClassVar[ExecutionError.ErrorKind] @@ -96,11 +97,15 @@ class ExecutionError(_message.Message): MESSAGE_FIELD_NUMBER: _ClassVar[int] ERROR_URI_FIELD_NUMBER: _ClassVar[int] KIND_FIELD_NUMBER: _ClassVar[int] + TIMESTAMP_FIELD_NUMBER: _ClassVar[int] + WORKER_FIELD_NUMBER: _ClassVar[int] code: str message: str error_uri: str kind: ExecutionError.ErrorKind - def __init__(self, code: _Optional[str] = ..., message: _Optional[str] = ..., error_uri: _Optional[str] = ..., kind: _Optional[_Union[ExecutionError.ErrorKind, str]] = ...) -> None: ... + timestamp: _timestamp_pb2.Timestamp + worker: str + def __init__(self, code: _Optional[str] = ..., message: _Optional[str] = ..., error_uri: _Optional[str] = ..., kind: _Optional[_Union[ExecutionError.ErrorKind, str]] = ..., timestamp: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., worker: _Optional[str] = ...) -> None: ... class TaskLog(_message.Message): __slots__ = ["uri", "name", "message_format", "ttl", "ShowWhilePending", "HideOnceFinished"] diff --git a/flyteidl/gen/pb_rust/flyteidl.core.rs b/flyteidl/gen/pb_rust/flyteidl.core.rs index bfbf82203d..a97a209a47 100644 --- a/flyteidl/gen/pb_rust/flyteidl.core.rs +++ b/flyteidl/gen/pb_rust/flyteidl.core.rs @@ -2129,6 +2129,12 @@ pub struct ExecutionError { pub error_uri: ::prost::alloc::string::String, #[prost(enumeration="execution_error::ErrorKind", tag="4")] pub kind: i32, + /// Timestamp of the error + #[prost(message, optional, tag="5")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Worker that generated the error + #[prost(string, tag="6")] + pub worker: ::prost::alloc::string::String, } /// Nested message and enum types in `ExecutionError`. pub mod execution_error { @@ -3091,6 +3097,12 @@ pub struct ContainerError { /// Defines the origin of the error (system, user, unknown). #[prost(enumeration="execution_error::ErrorKind", tag="4")] pub origin: i32, + /// Timestamp of the error + #[prost(message, optional, tag="5")] + pub timestamp: ::core::option::Option<::prost_types::Timestamp>, + /// Worker that generated the error + #[prost(string, tag="6")] + pub worker: ::prost::alloc::string::String, } /// Nested message and enum types in `ContainerError`. pub mod container_error { diff --git a/flyteidl/protos/flyteidl/core/errors.proto b/flyteidl/protos/flyteidl/core/errors.proto index 4d25389349..71ecd1de84 100644 --- a/flyteidl/protos/flyteidl/core/errors.proto +++ b/flyteidl/protos/flyteidl/core/errors.proto @@ -5,6 +5,7 @@ package flyteidl.core; option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; import "flyteidl/core/execution.proto"; +import "google/protobuf/timestamp.proto"; // Error message to propagate detailed errors from container executions to the execution // engine. @@ -25,6 +26,12 @@ message ContainerError { // Defines the origin of the error (system, user, unknown). ExecutionError.ErrorKind origin = 4; + + // Timestamp of the error + google.protobuf.Timestamp timestamp = 5; + + // Worker that generated the error + string worker = 6; } // Defines the errors.pb file format the container can produce to communicate diff --git a/flyteidl/protos/flyteidl/core/execution.proto b/flyteidl/protos/flyteidl/core/execution.proto index 4d55198955..3b9bfbbbb7 100644 --- a/flyteidl/protos/flyteidl/core/execution.proto +++ b/flyteidl/protos/flyteidl/core/execution.proto @@ -5,6 +5,7 @@ package flyteidl.core; option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; // Indicates various phases of Workflow Execution message WorkflowExecution { @@ -73,6 +74,10 @@ message ExecutionError { SYSTEM = 2; } ErrorKind kind = 4; + // Timestamp of the error + google.protobuf.Timestamp timestamp = 5; + // Worker that generated the error + string worker = 6; } // Log information for the task that is specific to a log sink diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go index b77615120a..3cd000dd40 100644 --- a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -17,6 +17,9 @@ import ( const ( flyteExecutionURL = "FLYTE_EXECUTION_URL" + + FlyteInternalWorkerNameEnvVarKey = "_F_WN" // "FLYTE_INTERNAL_WORKER_NAME" + FlyteInternalDistErrorStrategyEnvVarKey = "_F_DES" // "FLYTE_INTERNAL_DIST_ERROR_STRATEGY" ) func GetContextEnvVars(ownerCtx context.Context) []v1.EnvVar { diff --git a/flyteplugins/go/tasks/pluginmachinery/io/iface.go b/flyteplugins/go/tasks/pluginmachinery/io/iface.go index f876defe5a..1f32717812 100644 --- a/flyteplugins/go/tasks/pluginmachinery/io/iface.go +++ b/flyteplugins/go/tasks/pluginmachinery/io/iface.go @@ -27,13 +27,18 @@ type InputReader interface { Get(ctx context.Context) (*core.LiteralMap, error) } -// OutputReader provides an abstracted OutputReader interface. The plugins are responsible to provide -// the implementations for the interface. Some helper implementations can be found in ioutils -type OutputReader interface { +// ErrorReader provides an abstracted error reading interface, which is part of OutputReader below. +type ErrorReader interface { // IsError returns true if an error was detected when reading the output and false if no error was detected IsError(ctx context.Context) (bool, error) // ReadError returns the error as type ExecutionError ReadError(ctx context.Context) (ExecutionError, error) +} + +// OutputReader provides an abstracted OutputReader interface. The plugins are responsible to provide +// the implementations for the interface. Some helper implementations can be found in ioutils +type OutputReader interface { + ErrorReader // IsFile returns true if the outputs are using the OutputFilePaths specified files. If so it allows the system to // optimize the reads of the files IsFile(ctx context.Context) bool diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go index 4f8d678c14..ae880f3640 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go @@ -3,77 +3,265 @@ package ioutils import ( "context" "fmt" + "path/filepath" + "strings" + "time" "github.com/pkg/errors" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s" "github.com/flyteorg/flyte/flytestdlib/storage" ) -type RemoteFileOutputReader struct { - outPath io.OutputFilePaths +type baseErrorReader struct { store storage.ComposedProtobufStore maxPayloadSize int64 } +type singleFileErrorReader struct { + baseErrorReader + errorFilePath storage.DataReference +} + +type earliestFileErrorReader struct { + baseErrorReader + errorDirPath storage.DataReference + errorFilePathPrefix storage.DataReference + errorFileExtension string +} + +/* + We have a 'single file error reader' and 'earliest file error reader' as two + different strategies for reading task error files. + + Single file error reader is used to check for a single error.pb file uploaded + by a task, and is the default strategy. Earliest file error reader is used to check for + multiple error-.pb files and pick the one that has the earliest error timestamp. + It is used when a distributed task requests earliest timestamp error aggregation + strategy. To support backward compatibility, the earliest file error reader also handles + cases when there is a single error.pb file uploaded by the task. The earliest file + error reader is currently used for the PyTorch plugin. + + A few notes: + + - While the earliest file error reader handles the single error file scenario as well, + it is not set as the default, because its implementation depends on doing a listing operation + on remote storage. We do not want the listing overhead to be paid for the more common case of + having a single error file. + - Under the multiple error aggregation scenario, it is possible that the error aggregation + is performed before all the errors are reported. For PyTorch plugin specifically, the + the training operator will mark the job as 'done' when it detects one of the pods as failing. + Once Propeller detects this, it will perform the error aggregation. There is a rare scenario + where the pod that has the earliest error gets delayed in uploading its error file to + remote storage, and the pod that has a later error ends up completing first. If the + training operator's detection of job completion and Propeller's error aggregation happen so + fast that the pod with the earliest error has not yet uploaded it's error to remote storage, + we may end up reporting the wrong error. This is highly unlikely in practice. The implementation + we have here is significantly better than the prior behavior of reporting the latest written + error.pb file (as there was a race condition on overwriting error files), which is almost always + not the earliest error. + - The training operator does not have any error aggregation strategy implemented. PyTorch + distributed itself aggregates errors from the trainers running under the same elastic agent, + and reports the earliest error. The aggregation we perform here extends that to across pods. +*/ + +const errorFileNotFoundErrorCode = "ErrorFileNotFound" + var ErrRemoteFileExceedsMaxSize = errors.New("remote file exceeds max size") -func (r RemoteFileOutputReader) IsError(ctx context.Context) (bool, error) { - metadata, err := r.store.Head(ctx, r.outPath.GetErrorPath()) - if err != nil { - return false, errors.Wrapf(err, "failed to read error file @[%s]", r.outPath.GetErrorPath()) +func newSingleFileErrorReader(errorFilePath storage.DataReference, store storage.ComposedProtobufStore, maxPayloadSize int64) *singleFileErrorReader { + return &singleFileErrorReader{ + baseErrorReader: baseErrorReader{ + store: store, + maxPayloadSize: maxPayloadSize, + }, + errorFilePath: errorFilePath, } +} + +func (b *baseErrorReader) validatePayloadSize(filePath storage.DataReference, metadata storage.Metadata) error { if metadata.Exists() { - if metadata.Size() > r.maxPayloadSize { - return false, errors.Wrapf(err, "error file @[%s] is too large [%d] bytes, max allowed [%d] bytes", r.outPath.GetErrorPath(), metadata.Size(), r.maxPayloadSize) + if metadata.Size() > b.maxPayloadSize { + return errors.Wrapf(ErrRemoteFileExceedsMaxSize, + "output file @[%s] is too large [%d] bytes, max allowed [%d] bytes", + filePath, metadata.Size(), b.maxPayloadSize) } - return true, nil } - return false, nil + return nil } -func (r RemoteFileOutputReader) ReadError(ctx context.Context) (io.ExecutionError, error) { +func (s *singleFileErrorReader) IsError(ctx context.Context) (bool, error) { + metadata, err := s.store.Head(ctx, s.errorFilePath) + if err != nil { + return false, errors.Wrapf(err, "failed to read error file @[%s]", s.errorFilePath) + } + err = s.validatePayloadSize(s.errorFilePath, metadata) + if err != nil { + return false, err + } + return metadata.Exists(), nil +} + +func errorDoc2ExecutionError(errorDoc *core.ErrorDocument, errorFilePath storage.DataReference) io.ExecutionError { + if errorDoc.Error == nil { + return io.ExecutionError{ + IsRecoverable: true, + ExecutionError: &core.ExecutionError{ + Code: "ErrorFileBadFormat", + Message: fmt.Sprintf("error not formatted correctly, nil error @path [%s]", errorFilePath), + Kind: core.ExecutionError_SYSTEM, + }, + } + } + executionError := io.ExecutionError{ + ExecutionError: &core.ExecutionError{ + Code: errorDoc.Error.Code, + Message: errorDoc.Error.Message, + Kind: errorDoc.Error.Origin, + Timestamp: errorDoc.Error.Timestamp, + Worker: errorDoc.Error.Worker, + }, + } + + if errorDoc.Error.Kind == core.ContainerError_RECOVERABLE { + executionError.IsRecoverable = true + } + + return executionError +} + +func (s *singleFileErrorReader) ReadError(ctx context.Context) (io.ExecutionError, error) { errorDoc := &core.ErrorDocument{} - err := r.store.ReadProtobuf(ctx, r.outPath.GetErrorPath(), errorDoc) + err := s.store.ReadProtobuf(ctx, s.errorFilePath, errorDoc) if err != nil { if storage.IsNotFound(err) { return io.ExecutionError{ IsRecoverable: true, ExecutionError: &core.ExecutionError{ - Code: "ErrorFileNotFound", + Code: errorFileNotFoundErrorCode, Message: err.Error(), Kind: core.ExecutionError_SYSTEM, }, }, nil } - return io.ExecutionError{}, errors.Wrapf(err, "failed to read error data from task @[%s]", r.outPath.GetErrorPath()) + return io.ExecutionError{}, errors.Wrapf(err, "failed to read error data from task @[%s]", s.errorFilePath) } - if errorDoc.Error == nil { - return io.ExecutionError{ - IsRecoverable: true, - ExecutionError: &core.ExecutionError{ - Code: "ErrorFileBadFormat", - Message: fmt.Sprintf("error not formatted correctly, nil error @path [%s]", r.outPath.GetErrorPath()), - Kind: core.ExecutionError_SYSTEM, - }, - }, nil + return errorDoc2ExecutionError(errorDoc, s.errorFilePath), nil +} + +func (e *earliestFileErrorReader) IsError(ctx context.Context) (bool, error) { + hasError := false + const maxItems = 1000 + cursor := storage.NewCursorAtStart() + for cursor != storage.NewCursorAtEnd() { + var err error + var errorFilePaths []storage.DataReference + errorFilePaths, cursor, err = e.store.List(ctx, e.errorFilePathPrefix, maxItems, cursor) + if err != nil { + return false, errors.Wrapf(err, "failed to list error files @[%s]", e.errorDirPath) + } + for _, errorFilePath := range errorFilePaths { + if strings.HasSuffix(errorFilePath.String(), e.errorFileExtension) { + metadata, err := e.store.Head(ctx, errorFilePath) + if err != nil { + return false, errors.Wrapf(err, "failed to read error file @[%s]", errorFilePath) + } + err = e.validatePayloadSize(errorFilePath, metadata) + if err != nil { + return false, err + } + hasError = true + } + } } + return hasError, nil +} - ee := io.ExecutionError{ - ExecutionError: &core.ExecutionError{ - Code: errorDoc.Error.Code, - Message: errorDoc.Error.Message, - Kind: errorDoc.Error.Origin, - }, +func (e *earliestFileErrorReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + var earliestTimestamp *time.Time = nil + earliestExecutionError := io.ExecutionError{} + const maxItems = 1000 + cursor := storage.NewCursorAtStart() + for cursor != storage.NewCursorAtEnd() { + var err error + var errorFilePaths []storage.DataReference + errorFilePaths, cursor, err = e.store.List(ctx, e.errorFilePathPrefix, maxItems, cursor) + if err != nil { + return io.ExecutionError{}, errors.Wrapf(err, "failed to list error files @[%s]", e.errorDirPath) + } + for _, errorFilePath := range errorFilePaths { + if !strings.HasSuffix(errorFilePath.String(), e.errorFileExtension) { + continue + } + errorDoc := &core.ErrorDocument{} + err := e.store.ReadProtobuf(ctx, errorFilePath, errorDoc) + if err != nil { + return io.ExecutionError{}, errors.Wrapf(err, "failed to read error file @[%s]", errorFilePath.String()) + } + timestamp := errorDoc.Error.GetTimestamp().AsTime() + if earliestTimestamp == nil || earliestTimestamp.After(timestamp) { + earliestExecutionError = errorDoc2ExecutionError(errorDoc, errorFilePath) + earliestTimestamp = ×tamp + } + } } + return earliestExecutionError, nil +} - if errorDoc.Error.Kind == core.ContainerError_RECOVERABLE { - ee.IsRecoverable = true +func newEarliestFileErrorReader(errorDirPath storage.DataReference, canonicalErrorFilename string, store storage.ComposedProtobufStore, maxPayloadSize int64) (*earliestFileErrorReader, error) { + // If the canonical error file name is error.pb, we expect multiple error files + // to have name error.pb + pieces := strings.Split(canonicalErrorFilename, ".") + if len(pieces) != 2 { + return nil, errors.Errorf("expected canonical error filename to have a single dot (.), got %d", len(pieces)) + } + errorFilePrefix := pieces[0] + scheme, container, key, _ := errorDirPath.Split() + errorFilePathPrefix := storage.NewDataReference(scheme, container, filepath.Join(key, errorFilePrefix)) + errorFileExtension := fmt.Sprintf(".%s", pieces[1]) + + return &earliestFileErrorReader{ + baseErrorReader: baseErrorReader{ + store: store, + maxPayloadSize: maxPayloadSize, + }, + errorDirPath: errorDirPath, + errorFilePathPrefix: errorFilePathPrefix, + errorFileExtension: errorFileExtension, + }, nil +} + +func newErrorReader(errorAggregationStrategy k8s.ErrorAggregationStrategy, errorDirPath storage.DataReference, errorFilename string, store storage.ComposedProtobufStore, maxPayloadSize int64) (io.ErrorReader, error) { + if errorAggregationStrategy == k8s.DefaultErrorAggregationStrategy { + scheme, container, key, err := errorDirPath.Split() + if err != nil { + return nil, errors.Wrapf(err, "invalid error dir path %s", errorDirPath) + } + errorFilePath := storage.NewDataReference(scheme, container, filepath.Join(key, errorFilename)) + return newSingleFileErrorReader(errorFilePath, store, maxPayloadSize), nil } + if errorAggregationStrategy == k8s.EarliestErrorAggregationStrategy { + return newEarliestFileErrorReader(errorDirPath, errorFilename, store, maxPayloadSize) + } + return nil, errors.Errorf("unknown error aggregation strategy: %v", errorAggregationStrategy) +} + +type RemoteFileOutputReader struct { + outPath io.OutputFilePaths + store storage.ComposedProtobufStore + maxPayloadSize int64 + errorReader io.ErrorReader +} + +func (r RemoteFileOutputReader) IsError(ctx context.Context) (bool, error) { + return r.errorReader.IsError(ctx) +} - return ee, nil +func (r RemoteFileOutputReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + return r.errorReader.ReadError(ctx) } func (r RemoteFileOutputReader) Exists(ctx context.Context) (bool, error) { @@ -124,16 +312,43 @@ func (r RemoteFileOutputReader) DeckExists(ctx context.Context) (bool, error) { return md.Exists(), nil } -func NewRemoteFileOutputReader(_ context.Context, store storage.ComposedProtobufStore, outPaths io.OutputFilePaths, maxDatasetSize int64) RemoteFileOutputReader { +func getMaxPayloadSize(maxDatasetSize int64) int64 { // Note: even though the data store retrieval checks against GetLimitMegabytes, there might be external // storage implementations, so we keep this check here as well. maxPayloadSize := maxDatasetSize if maxPayloadSize == 0 { maxPayloadSize = storage.GetConfig().Limits.GetLimitMegabytes * 1024 * 1024 } + return maxPayloadSize +} + +func NewRemoteFileOutputReader(context context.Context, store storage.ComposedProtobufStore, outPaths io.OutputFilePaths, maxDatasetSize int64) RemoteFileOutputReader { + maxPayloadSize := getMaxPayloadSize(maxDatasetSize) + errorReader := newSingleFileErrorReader(outPaths.GetErrorPath(), store, maxPayloadSize) return RemoteFileOutputReader{ outPath: outPaths, store: store, maxPayloadSize: maxPayloadSize, + errorReader: errorReader, + } +} + +func NewRemoteFileOutputReaderWithErrorAggregationStrategy(_ context.Context, store storage.ComposedProtobufStore, outPaths io.OutputFilePaths, maxDatasetSize int64, errorAggregationStrategy k8s.ErrorAggregationStrategy) (*RemoteFileOutputReader, error) { + maxPayloadSize := getMaxPayloadSize(maxDatasetSize) + scheme, container, key, err := outPaths.GetErrorPath().Split() + if err != nil { + return nil, errors.Wrapf(err, "failed to parse error path %s", outPaths.GetErrorPath()) } + errorFilename := filepath.Base(key) + errorDirPath := storage.NewDataReference(scheme, container, filepath.Dir(key)) + errorReader, err := newErrorReader(errorAggregationStrategy, errorDirPath, errorFilename, store, maxPayloadSize) + if err != nil { + return nil, errors.Wrapf(err, "failed to create remote output reader with error aggregation strategy %v", errorAggregationStrategy) + } + return &RemoteFileOutputReader{ + outPath: outPaths, + store: store, + maxPayloadSize: maxPayloadSize, + errorReader: errorReader, + }, nil } diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go index a1393de7e9..1cd7099f78 100644 --- a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go @@ -2,14 +2,20 @@ package ioutils import ( "context" + "fmt" + "strconv" + "strings" "testing" + "time" regErrors "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" pluginsIOMock "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s" "github.com/flyteorg/flyte/flytestdlib/storage" storageMocks "github.com/flyteorg/flyte/flytestdlib/storage/mocks" ) @@ -92,11 +98,13 @@ func TestReadOrigin(t *testing.T) { exists: true, }, nil) - r := RemoteFileOutputReader{ - outPath: opath, - store: store, - maxPayloadSize: 0, - } + maxPayloadSize := int64(0) + r := NewRemoteFileOutputReader( + ctx, + store, + opath, + maxPayloadSize, + ) ee, err := r.ReadError(ctx) assert.NoError(t, err) @@ -124,15 +132,132 @@ func TestReadOrigin(t *testing.T) { casted.Error = errorDoc.Error }).Return(nil) - r := RemoteFileOutputReader{ - outPath: opath, - store: store, - maxPayloadSize: 0, - } + maxPayloadSize := int64(0) + r := NewRemoteFileOutputReader( + ctx, + store, + opath, + maxPayloadSize, + ) ee, err := r.ReadError(ctx) assert.NoError(t, err) assert.Equal(t, core.ExecutionError_SYSTEM, ee.Kind) assert.True(t, ee.IsRecoverable) }) + + t.Run("multi-user-error", func(t *testing.T) { + outputPaths := &pluginsIOMock.OutputFilePaths{} + outputPaths.OnGetErrorPath().Return("s3://errors/error.pb") + + store := &storageMocks.ComposedProtobufStore{} + store.OnReadProtobufMatch(mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + errorFilePath := args.Get(1).(storage.DataReference) + workerIdx, err := strconv.Atoi(strings.Split(strings.Split(errorFilePath.String(), "-")[1], ".")[0]) + assert.NoError(t, err) + errorDoc := &core.ErrorDocument{ + Error: &core.ContainerError{ + Code: "red", + Message: fmt.Sprintf("hi-%d", workerIdx), + Kind: core.ContainerError_NON_RECOVERABLE, + Origin: core.ExecutionError_USER, + Worker: fmt.Sprintf("worker-%d", workerIdx), + Timestamp: timestamppb.New(time.Unix(int64(100-workerIdx%2), 0)), + }, + } + incomingErrorDoc := args.Get(2) + assert.NotNil(t, incomingErrorDoc) + casted := incomingErrorDoc.(*core.ErrorDocument) + casted.Error = errorDoc.Error + }).Return(nil) + + store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return( + []storage.DataReference{"error-0.pb", "error-1.pb", "error-2.pb"}, storage.NewCursorAtEnd(), nil) + + store.OnHead(ctx, storage.DataReference("error-0.pb")).Return(MemoryMetadata{ + exists: true, + }, nil) + + store.OnHead(ctx, storage.DataReference("error-1.pb")).Return(MemoryMetadata{ + exists: true, + }, nil) + + store.OnHead(ctx, storage.DataReference("error-2.pb")).Return(MemoryMetadata{ + exists: true, + }, nil) + + maxPayloadSize := int64(0) + r, err := NewRemoteFileOutputReaderWithErrorAggregationStrategy( + ctx, + store, + outputPaths, + maxPayloadSize, + k8s.EarliestErrorAggregationStrategy, + ) + assert.NoError(t, err) + + hasError, err := r.IsError(ctx) + assert.NoError(t, err) + assert.True(t, hasError) + + executionError, err := r.ReadError(ctx) + assert.NoError(t, err) + assert.Equal(t, core.ExecutionError_USER, executionError.Kind) + assert.Equal(t, "red", executionError.Code) + assert.Equal(t, "hi-1", executionError.Message) + assert.Equal(t, "worker-1", executionError.Worker) + assert.Equal(t, timestamppb.New(time.Unix(99, 0)), executionError.Timestamp) + assert.False(t, executionError.IsRecoverable) + }) + + t.Run("multi-user-error-backward-compat", func(t *testing.T) { + outputPaths := &pluginsIOMock.OutputFilePaths{} + outputPaths.OnGetErrorPath().Return("s3://errors/error.pb") + + store := &storageMocks.ComposedProtobufStore{} + store.OnReadProtobufMatch(mock.Anything, mock.Anything, mock.Anything).Run(func(args mock.Arguments) { + errorDoc := &core.ErrorDocument{ + Error: &core.ContainerError{ + Code: "red", + Message: "hi", + Kind: core.ContainerError_NON_RECOVERABLE, + Origin: core.ExecutionError_USER, + }, + } + incomingErrorDoc := args.Get(2) + assert.NotNil(t, incomingErrorDoc) + casted := incomingErrorDoc.(*core.ErrorDocument) + casted.Error = errorDoc.Error + }).Return(nil) + + store.OnList(ctx, storage.DataReference("s3://errors/error"), 1000, storage.NewCursorAtStart()).Return( + []storage.DataReference{"error.pb"}, storage.NewCursorAtEnd(), nil) + + store.OnHead(ctx, storage.DataReference("error.pb")).Return(MemoryMetadata{ + exists: true, + }, nil) + + maxPayloadSize := int64(0) + r, err := NewRemoteFileOutputReaderWithErrorAggregationStrategy( + ctx, + store, + outputPaths, + maxPayloadSize, + k8s.EarliestErrorAggregationStrategy, + ) + assert.NoError(t, err) + + hasError, err := r.IsError(ctx) + assert.NoError(t, err) + assert.True(t, hasError) + + executionError, err := r.ReadError(ctx) + assert.NoError(t, err) + assert.Equal(t, core.ExecutionError_USER, executionError.Kind) + assert.Equal(t, "red", executionError.Code) + assert.Equal(t, "hi", executionError.Message) + assert.Equal(t, "", executionError.Worker) + assert.Nil(t, executionError.Timestamp) + assert.False(t, executionError.IsRecoverable) + }) } diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go b/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go index 38a84f9b2b..8b2124e1cd 100644 --- a/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go @@ -30,6 +30,27 @@ type PluginEntry struct { CustomKubeClient func(ctx context.Context) (pluginsCore.KubeClient, error) } +type ErrorAggregationStrategy int + +const ( + // Single error file from a single container + DefaultErrorAggregationStrategy ErrorAggregationStrategy = iota + + // Earliest error from potentially multiple error files + EarliestErrorAggregationStrategy +) + +func (e ErrorAggregationStrategy) String() string { + switch e { + case DefaultErrorAggregationStrategy: + return "Default" + case EarliestErrorAggregationStrategy: + return "Earliest" + default: + panic("Unknown enum value, cannot happen") + } +} + // System level properties that this Plugin supports type PluginProperties struct { // Disables the inclusion of OwnerReferences in kubernetes resources that this plugin is responsible for. @@ -45,6 +66,8 @@ type PluginProperties struct { // override that behavior unless the resource that gets created for this plugin does not consume resources (cluster's // cpu/memory... etc. or external resources) once the plugin's Plugin.GetTaskPhase() returns a terminal phase. DisableDeleteResourceOnFinalize bool + // Specifies how errors are aggregated + ErrorAggregationStrategy ErrorAggregationStrategy } // Special context passed in to plugins when checking task phase diff --git a/flyteplugins/go/tasks/plugins/array/outputs_test.go b/flyteplugins/go/tasks/plugins/array/outputs_test.go index 529eba0429..eb1e874bc5 100644 --- a/flyteplugins/go/tasks/plugins/array/outputs_test.go +++ b/flyteplugins/go/tasks/plugins/array/outputs_test.go @@ -353,6 +353,7 @@ func TestAssembleFinalOutputs(t *testing.T) { ow := &mocks2.OutputWriter{} ow.OnGetOutputPrefixPath().Return("/prefix/") ow.OnGetOutputPath().Return("/prefix/outputs.pb") + ow.OnGetErrorPath().Return("/prefix/error.pb") ow.On("Put", mock.Anything, mock.Anything).Return(func(ctx context.Context, or io.OutputReader) error { m, ee, err := or.Read(ctx) assert.NoError(t, err) diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go index 8084b75b4c..6d7c80a7fd 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go @@ -7,6 +7,7 @@ import ( commonOp "github.com/kubeflow/common/pkg/apis/common/v1" kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes/scheme" "sigs.k8s.io/controller-runtime/pkg/client" @@ -16,6 +17,7 @@ import ( flyteerr "github.com/flyteorg/flyte/flyteplugins/go/tasks/errors" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery" pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" + pluginsK8s "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/utils" "github.com/flyteorg/flyte/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" @@ -28,7 +30,9 @@ type pytorchOperatorResourceHandler struct { var _ k8s.Plugin = pytorchOperatorResourceHandler{} func (pytorchOperatorResourceHandler) GetProperties() k8s.PluginProperties { - return k8s.PluginProperties{} + return k8s.PluginProperties{ + ErrorAggregationStrategy: k8s.EarliestErrorAggregationStrategy, + } } // Defines a func to create a query object (typically just object and type meta portions) that's used to query k8s @@ -99,6 +103,25 @@ func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create worker replica spec: [%v]", err.Error()) } + updateEnvVars := func(container *apiv1.Container) { + if container.Env == nil { + container.Env = make([]apiv1.EnvVar, 0, 2) + } + container.Env = append(container.Env, apiv1.EnvVar{ + Name: pluginsK8s.FlyteInternalWorkerNameEnvVarKey, + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }) + container.Env = append(container.Env, apiv1.EnvVar{ + Name: pluginsK8s.FlyteInternalDistErrorStrategyEnvVarKey, + Value: k8s.EarliestErrorAggregationStrategy.String(), + }) + } + updateEnvVars(&workerReplicaSpec.Template.Spec.Containers[0]) + if kfPytorchTaskExtraArgs.GetRunPolicy() != nil { runPolicy = common.ParseRunPolicy(*kfPytorchTaskExtraArgs.GetRunPolicy()) } diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go index 546b42d7df..814b340fe6 100644 --- a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go @@ -12,6 +12,7 @@ import ( kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + apiv1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -23,6 +24,7 @@ import ( pluginsCore "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/core/mocks" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s" + pluginsK8s "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s" flytek8sConfig "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" pluginIOMocks "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/io/mocks" "github.com/flyteorg/flyte/flyteplugins/go/tasks/pluginmachinery/k8s" @@ -712,7 +714,9 @@ func TestGetLogsElastic(t *testing.T) { func TestGetProperties(t *testing.T) { pytorchResourceHandler := pytorchOperatorResourceHandler{} - expected := k8s.PluginProperties{} + expected := k8s.PluginProperties{ + ErrorAggregationStrategy: k8s.EarliestErrorAggregationStrategy, + } assert.Equal(t, expected, pytorchResourceHandler.GetProperties()) } @@ -876,6 +880,26 @@ func TestBuildResourcePytorchV1(t *testing.T) { assert.Nil(t, pytorchJob.Spec.RunPolicy.ActiveDeadlineSeconds) assert.Nil(t, pytorchJob.Spec.ElasticPolicy) + + // validate plugin specific environment variables + workerContainerEnv := pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers[0].Env + assert.Equal(t, + []apiv1.EnvVar{ + { + Name: pluginsK8s.FlyteInternalWorkerNameEnvVarKey, + ValueFrom: &apiv1.EnvVarSource{ + FieldRef: &apiv1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: pluginsK8s.FlyteInternalDistErrorStrategyEnvVarKey, + Value: "Earliest", + }, + }, + workerContainerEnv[len(workerContainerEnv)-2:], + ) } } diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go index c9c9167146..431824dad2 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager.go @@ -294,7 +294,12 @@ func (e *PluginManager) checkResourcePhase(ctx context.Context, tCtx pluginsCore var opReader io.OutputReader if pCtx.ow == nil { logger.Infof(ctx, "Plugin [%s] returned no outputReader, assuming file based outputs", e.id) - opReader = ioutils.NewRemoteFileOutputReader(ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0) + opReader, err = ioutils.NewRemoteFileOutputReaderWithErrorAggregationStrategy( + ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0, + e.plugin.GetProperties().ErrorAggregationStrategy) + if err != nil { + return pluginsCore.UnknownTransition, err + } } else { logger.Infof(ctx, "Plugin [%s] returned outputReader", e.id) opReader = pCtx.ow.GetReader() diff --git a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager_test.go b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager_test.go index a2bcb57014..1d8d5064d9 100644 --- a/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager_test.go +++ b/flytepropeller/pkg/controller/nodes/task/k8s/plugin_manager_test.go @@ -33,6 +33,7 @@ import ( "github.com/flyteorg/flyte/flytestdlib/contextutils" "github.com/flyteorg/flyte/flytestdlib/promutils" "github.com/flyteorg/flyte/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/flytestdlib/storage" ) type extendedFakeClient struct { @@ -163,6 +164,10 @@ func (d *dummyOutputWriter) Put(ctx context.Context, reader io.OutputReader) err return nil } +func (d *dummyOutputWriter) GetErrorPath() storage.DataReference { + return "s3://errors/error.pb" +} + func getMockTaskContext(initPhase PluginPhase, wantPhase PluginPhase) pluginsCore.TaskExecutionContext { taskExecutionContext := &pluginsCoreMock.TaskExecutionContext{} taskExecutionContext.OnTaskExecutionMetadata().Return(getMockTaskExecutionMetadata()) diff --git a/flytepropeller/pkg/controller/workflow/executor_test.go b/flytepropeller/pkg/controller/workflow/executor_test.go index f691a0028c..2be7238dbb 100644 --- a/flytepropeller/pkg/controller/workflow/executor_test.go +++ b/flytepropeller/pkg/controller/workflow/executor_test.go @@ -100,7 +100,8 @@ func (f fakeRemoteWritePlugin) Handle(ctx context.Context, tCtx pluginCore.TaskE o.Literals[k] = l } assert.NoError(f.t, tCtx.DataStore().WriteProtobuf(ctx, tCtx.OutputWriter().GetOutputPath(), storage.Options{}, o)) - assert.NoError(f.t, tCtx.OutputWriter().Put(ctx, ioutils.NewRemoteFileOutputReader(ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0))) + reader := ioutils.NewRemoteFileOutputReader(ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0) + assert.NoError(f.t, tCtx.OutputWriter().Put(ctx, reader)) } return trns, err } diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index 52e6905513..3d53a4d25f 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -8,6 +8,7 @@ package storage import ( "context" + "fmt" "io" "net/url" "strings" @@ -171,3 +172,7 @@ func (r DataReference) Split() (scheme, container, key string, err error) { func (r DataReference) String() string { return string(r) } + +func NewDataReference(scheme string, container string, key string) DataReference { + return DataReference(fmt.Sprintf("%s://%s/%s", scheme, container, key)) +} diff --git a/flytestdlib/storage/storage_test.go b/flytestdlib/storage/storage_test.go index d4896f274b..3f369bab55 100644 --- a/flytestdlib/storage/storage_test.go +++ b/flytestdlib/storage/storage_test.go @@ -11,6 +11,14 @@ import ( "github.com/flyteorg/flyte/flytestdlib/promutils" ) +func TestDataReference_New(t *testing.T) { + scheme := "s3" + container := "container" + key := "path/to/file" + dataReference := NewDataReference(scheme, container, key) + assert.Equal(t, DataReference("s3://container/path/to/file"), dataReference) +} + func TestDataReference_Split(t *testing.T) { input := DataReference("s3://container/path/to/file") scheme, container, key, err := input.Split() diff --git a/flytestdlib/storage/stow_store.go b/flytestdlib/storage/stow_store.go index 4b8089b502..e86a199bda 100644 --- a/flytestdlib/storage/stow_store.go +++ b/flytestdlib/storage/stow_store.go @@ -263,13 +263,13 @@ func (s *StowStore) Head(ctx context.Context, reference DataReference) (Metadata } func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { - _, c, k, err := reference.Split() + scheme, containerName, key, err := reference.Split() if err != nil { s.metrics.BadReference.Inc(ctx) return nil, NewCursorAtEnd(), err } - container, err := s.getContainer(ctx, locationIDMain, c) + container, err := s.getContainer(ctx, locationIDMain, containerName) if err != nil { return nil, NewCursorAtEnd(), err } @@ -284,14 +284,14 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems } else { stowCursor = cursor.customPosition } - items, stowCursor, err := container.Items(k, stowCursor, maxItems) + items, stowCursor, err := container.Items(key, stowCursor, maxItems) t1.Stop() t2.Stop() if err == nil { results := make([]DataReference, len(items)) for index, item := range items { - results[index] = DataReference(item.URL().String()) + results[index] = DataReference(fmt.Sprintf("%s://%s/%s", scheme, containerName, item.URL().String())) } if stow.IsCursorEnd(stowCursor) { cursor = NewCursorAtEnd() @@ -302,7 +302,7 @@ func (s *StowStore) List(ctx context.Context, reference DataReference, maxItems } incFailureCounterForError(ctx, s.metrics.ListFailure, err) - return nil, NewCursorAtEnd(), errs.Wrapf(err, "path:%v", k) + return nil, NewCursorAtEnd(), errs.Wrapf(err, "path:%v", key) } func (s *StowStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { diff --git a/flytestdlib/storage/stow_store_test.go b/flytestdlib/storage/stow_store_test.go index 4de273dd93..aec59051f3 100644 --- a/flytestdlib/storage/stow_store_test.go +++ b/flytestdlib/storage/stow_store_test.go @@ -419,7 +419,7 @@ func TestStowStore_List(t *testing.T) { items, cursor, err := s.List(ctx, dataReference, maxResults, NewCursorAtStart()) assert.NoError(t, err) assert.Equal(t, NewCursorAtEnd(), cursor) - assert.Equal(t, []DataReference{"a/1", "a/2"}, items) + assert.Equal(t, []DataReference{"s3://container/a/1", "s3://container/a/2"}, items) }) t.Run("Listing with pagination", func(t *testing.T) { @@ -446,10 +446,10 @@ func TestStowStore_List(t *testing.T) { var dataReference DataReference = "s3://container/a" items, cursor, err := s.List(ctx, dataReference, maxResults, NewCursorAtStart()) assert.NoError(t, err) - assert.Equal(t, []DataReference{"a/1"}, items) + assert.Equal(t, []DataReference{"s3://container/a/1"}, items) items, _, err = s.List(ctx, dataReference, maxResults, cursor) assert.NoError(t, err) - assert.Equal(t, []DataReference{"a/2"}, items) + assert.Equal(t, []DataReference{"s3://container/a/2"}, items) }) } From 2fa77a5eb1a9e0a1f315b40241314751de06c2ef Mon Sep 17 00:00:00 2001 From: "Fabio M. Graetz, Ph.D." Date: Tue, 5 Nov 2024 19:41:08 +0100 Subject: [PATCH 12/18] Fix: avoid log spam for log links generated during the pod's pending phase (#5945) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Fabio Grätz Co-authored-by: Fabio Grätz --- flyteplugins/go/tasks/logs/logging_utils.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/flyteplugins/go/tasks/logs/logging_utils.go b/flyteplugins/go/tasks/logs/logging_utils.go index 3322cc37d8..4bfff0dd17 100644 --- a/flyteplugins/go/tasks/logs/logging_utils.go +++ b/flyteplugins/go/tasks/logs/logging_utils.go @@ -31,7 +31,14 @@ func GetLogsForContainerInPod(ctx context.Context, logPlugin tasklog.Plugin, tas containerID := v1.ContainerStatus{}.ContainerID if uint32(len(pod.Status.ContainerStatuses)) <= index { - logger.Errorf(ctx, "containerStatus IndexOutOfBound, requested [%d], but total containerStatuses [%d] in pod phase [%v]", index, len(pod.Status.ContainerStatuses), pod.Status.Phase) + msg := fmt.Sprintf("containerStatus IndexOutOfBound, requested [%d], but total containerStatuses [%d] in pod phase [%v]", index, len(pod.Status.ContainerStatuses), pod.Status.Phase) + if pod.Status.Phase == v1.PodPending { + // If the pod is pending, the container status may not be available yet. Log as debug. + logger.Debugf(ctx, msg) + } else { + // In other phases, this is unexpected. Log as error. + logger.Errorf(ctx, msg) + } } else { containerID = pod.Status.ContainerStatuses[index].ContainerID } From d8afbcc6fd2ea400c34973520a51a3e57f412f28 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=B1=9F=E5=AE=B6=E7=91=8B?= <36886416+JiangJiaWei1103@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:45:46 +0100 Subject: [PATCH 13/18] docs: Align note with the output naming convention (#5919) Signed-off-by: JiaWei Jiang --- docs/user_guide/basics/tasks.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/user_guide/basics/tasks.md b/docs/user_guide/basics/tasks.md index acedcad51e..b76e61f5dc 100644 --- a/docs/user_guide/basics/tasks.md +++ b/docs/user_guide/basics/tasks.md @@ -51,9 +51,9 @@ We create a task that computes the slope of a regression line: ``` :::{note} -Flytekit will assign a default name to the output variable like `out0`. +Flytekit will assign a default name to the output variable like `o0`. In case of multiple outputs, each output will be numbered in the order -starting with 0, e.g., -> `out0, out1, out2, ...`. +starting with 0, e.g., `o0`, `o1`, `o2`, etc. ::: You can execute a Flyte task just like any regular Python function: From 96c467e14691eb3e956d3b21c05fb724c240fe97 Mon Sep 17 00:00:00 2001 From: Vincent Chen <62143443+mao3267@users.noreply.github.com> Date: Wed, 6 Nov 2024 04:20:55 +0800 Subject: [PATCH 14/18] docs: add copy command examples and description (#5782) Signed-off-by: mao3267 --- .../customizing_dependencies/imagespec.md | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/docs/user_guide/customizing_dependencies/imagespec.md b/docs/user_guide/customizing_dependencies/imagespec.md index 0e66eadd95..6d992ddb72 100644 --- a/docs/user_guide/customizing_dependencies/imagespec.md +++ b/docs/user_guide/customizing_dependencies/imagespec.md @@ -175,6 +175,26 @@ image_spec = ImageSpec( ) ``` +## Copy additional files or directories +You can specify files or directories to be copied into the container `/root`, allowing users to access the required files. The directory structure will match the relative path. Since Docker only supports relative paths, absolute paths and paths outside the current working directory (e.g., paths with "../") are not allowed. + +```py +from flytekit.image_spec import ImageSpec +from flytekit import task, workflow + +image_spec = ImageSpec( + name="image_with_copy", + registry="localhost:30000", + builder="default", + copy=["files/input.txt"], +) + +@task(container_image=image_spec) +def my_task() -> str: + with open("/root/files/input.txt", "r") as f: + return f.read() +``` + ## Define ImageSpec in a YAML File You can override the container image by providing an ImageSpec YAML file to the `pyflyte run` or `pyflyte register` command. From f14348165ccdfb26f8509c0f1ef380a360e59c4d Mon Sep 17 00:00:00 2001 From: Troy Chiu <114708546+troychiu@users.noreply.github.com> Date: Wed, 6 Nov 2024 12:28:18 -0800 Subject: [PATCH 15/18] Hide generated launch plans starting with .flytegen in the UI (#5949) Signed-off-by: troychiu --- flyteadmin/pkg/common/filters.go | 13 ++++++++++ flyteadmin/pkg/common/filters_test.go | 16 ++++++++++++ .../pkg/manager/impl/execution_manager.go | 2 +- .../pkg/manager/impl/named_entity_manager.go | 21 +++++---------- .../manager/impl/named_entity_manager_test.go | 10 +++---- .../impl/util/single_task_execution.go | 15 ++++++++++- .../impl/util/single_task_execution_test.go | 17 +++++++++++- .../impl/validation/named_entity_validator.go | 2 +- .../validation/named_entity_validator_test.go | 26 ++++++++++++++++++- 9 files changed, 96 insertions(+), 26 deletions(-) diff --git a/flyteadmin/pkg/common/filters.go b/flyteadmin/pkg/common/filters.go index 57756e7820..697bb75179 100644 --- a/flyteadmin/pkg/common/filters.go +++ b/flyteadmin/pkg/common/filters.go @@ -22,6 +22,7 @@ type GormQueryExpr struct { // Complete set of filters available for database queries. const ( Contains FilterExpression = iota + NotLike GreaterThan GreaterThanOrEqual LessThan @@ -37,6 +38,7 @@ const ( joinArgsFormat = "%s.%s" containsQuery = "%s LIKE ?" containsArgs = "%%%s%%" + notLikeQuery = "%s NOT LIKE ?" greaterThanQuery = "%s > ?" greaterThanOrEqualQuery = "%s >= ?" lessThanQuery = "%s < ?" @@ -50,6 +52,7 @@ const ( // Set of available filters which exclusively accept a single argument value. var singleValueFilters = map[FilterExpression]bool{ Contains: true, + NotLike: true, GreaterThan: true, GreaterThanOrEqual: true, LessThan: true, @@ -68,6 +71,7 @@ const EqualExpression = "eq" var filterNameMappings = map[string]FilterExpression{ "contains": Contains, + "not_like": NotLike, "gt": GreaterThan, "gte": GreaterThanOrEqual, "lt": LessThan, @@ -80,6 +84,7 @@ var filterNameMappings = map[string]FilterExpression{ var filterQueryMappings = map[FilterExpression]string{ Contains: containsQuery, + NotLike: notLikeQuery, GreaterThan: greaterThanQuery, GreaterThanOrEqual: greaterThanOrEqualQuery, LessThan: lessThanQuery, @@ -117,6 +122,8 @@ func getFilterExpressionName(expression FilterExpression) string { switch expression { case Contains: return "contains" + case NotLike: + return "not like" case GreaterThan: return "greater than" case GreaterThanOrEqual: @@ -208,6 +215,12 @@ func (f *inlineFilterImpl) getGormQueryExpr(formattedField string) (GormQueryExp // args renders to something like: "%value%" Args: fmt.Sprintf(containsArgs, f.value), }, nil + case NotLike: + return GormQueryExpr{ + // WHERE field NOT LIKE value + Query: fmt.Sprintf(notLikeQuery, formattedField), + Args: f.value, + }, nil case GreaterThan: return GormQueryExpr{ // WHERE field > value diff --git a/flyteadmin/pkg/common/filters_test.go b/flyteadmin/pkg/common/filters_test.go index 87ba5ac2ac..85092e36b6 100644 --- a/flyteadmin/pkg/common/filters_test.go +++ b/flyteadmin/pkg/common/filters_test.go @@ -107,6 +107,7 @@ func TestGetGormJoinTableQueryExpr(t *testing.T) { var expectedArgsForFilters = map[FilterExpression]string{ Contains: "%value%", + NotLike: "value", GreaterThan: "value", GreaterThanOrEqual: "value", LessThan: "value", @@ -169,3 +170,18 @@ func TestWithDefaultValueFilter(t *testing.T) { assert.Equal(t, "COALESCE(named_entity_metadata.state, 0) = ?", queryExpression.Query) assert.Equal(t, 1, queryExpression.Args) } + +func TestNotLikeFilter(t *testing.T) { + filter, err := NewSingleValueFilter(NamedEntityMetadata, NotLike, "name", ".flytegen%") + assert.NoError(t, err) + + queryExpression, err := filter.GetGormQueryExpr() + assert.NoError(t, err) + assert.Equal(t, "name NOT LIKE ?", queryExpression.Query) + assert.Equal(t, ".flytegen%", queryExpression.Args) + + queryExpression, err = filter.GetGormJoinTableQueryExpr("named_entity_metadata") + assert.NoError(t, err) + assert.Equal(t, "named_entity_metadata.name NOT LIKE ?", queryExpression.Query) + assert.Equal(t, ".flytegen%", queryExpression.Args) +} diff --git a/flyteadmin/pkg/manager/impl/execution_manager.go b/flyteadmin/pkg/manager/impl/execution_manager.go index 27acf152ec..e700a744d8 100644 --- a/flyteadmin/pkg/manager/impl/execution_manager.go +++ b/flyteadmin/pkg/manager/impl/execution_manager.go @@ -480,7 +480,7 @@ func (m *ExecutionManager) launchSingleTaskExecution( return nil, nil, err } - launchPlan, err := util.CreateOrGetLaunchPlan(ctx, m.db, m.config, taskIdentifier, + launchPlan, err := util.CreateOrGetLaunchPlan(ctx, m.db, m.config, m.namedEntityManager, taskIdentifier, workflow.Closure.CompiledWorkflow.Primary.Template.Interface, workflowModel.ID, request.Spec) if err != nil { return nil, nil, err diff --git a/flyteadmin/pkg/manager/impl/named_entity_manager.go b/flyteadmin/pkg/manager/impl/named_entity_manager.go index 329061c4d3..883948318a 100644 --- a/flyteadmin/pkg/manager/impl/named_entity_manager.go +++ b/flyteadmin/pkg/manager/impl/named_entity_manager.go @@ -2,6 +2,7 @@ package impl import ( "context" + "fmt" "strconv" "strings" @@ -17,7 +18,6 @@ import ( "github.com/flyteorg/flyte/flyteadmin/pkg/repositories/transformers" runtimeInterfaces "github.com/flyteorg/flyte/flyteadmin/pkg/runtime/interfaces" "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin" - "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" "github.com/flyteorg/flyte/flytestdlib/contextutils" "github.com/flyteorg/flyte/flytestdlib/logger" "github.com/flyteorg/flyte/flytestdlib/promutils" @@ -25,13 +25,6 @@ import ( const state = "state" -// System-generated workflows are meant to be hidden from the user by default. Therefore we always only show -// workflow-type named entities that have been user generated only. -var nonSystemGeneratedWorkflowsFilter, _ = common.NewSingleValueFilter( - common.NamedEntityMetadata, common.NotEqual, state, admin.NamedEntityState_SYSTEM_GENERATED) -var defaultWorkflowsFilter, _ = common.NewWithDefaultValueFilter( - strconv.Itoa(int(admin.NamedEntityState_NAMED_ENTITY_ACTIVE)), nonSystemGeneratedWorkflowsFilter) - type NamedEntityMetrics struct { Scope promutils.Scope } @@ -75,12 +68,8 @@ func (m *NamedEntityManager) GetNamedEntity(ctx context.Context, request *admin. return util.GetNamedEntity(ctx, m.db, request.ResourceType, request.Id) } -func (m *NamedEntityManager) getQueryFilters(referenceEntity core.ResourceType, requestFilters string) ([]common.InlineFilter, error) { +func (m *NamedEntityManager) getQueryFilters(requestFilters string) ([]common.InlineFilter, error) { filters := make([]common.InlineFilter, 0) - if referenceEntity == core.ResourceType_WORKFLOW { - filters = append(filters, defaultWorkflowsFilter) - } - if len(requestFilters) == 0 { return filters, nil } @@ -111,10 +100,14 @@ func (m *NamedEntityManager) ListNamedEntities(ctx context.Context, request *adm } ctx = contextutils.WithProjectDomain(ctx, request.Project, request.Domain) + if len(request.Filters) == 0 { + // Add implicit filter to exclude system generated workflows + request.Filters = fmt.Sprintf("not_like(name,%s)", ".flytegen%") + } // HACK: In order to filter by state (if requested) - we need to amend the filter to use COALESCE // e.g. eq(state, 1) becomes 'WHERE (COALESCE(state, 0) = '1')' since not every NamedEntity necessarily // has an entry, and therefore the default state value '0' (active), should be assumed. - filters, err := m.getQueryFilters(request.ResourceType, request.Filters) + filters, err := m.getQueryFilters(request.Filters) if err != nil { return nil, err } diff --git a/flyteadmin/pkg/manager/impl/named_entity_manager_test.go b/flyteadmin/pkg/manager/impl/named_entity_manager_test.go index 6bdd5620c5..0d009753c8 100644 --- a/flyteadmin/pkg/manager/impl/named_entity_manager_test.go +++ b/flyteadmin/pkg/manager/impl/named_entity_manager_test.go @@ -87,7 +87,7 @@ func TestNamedEntityManager_Get_BadRequest(t *testing.T) { func TestNamedEntityManager_getQueryFilters(t *testing.T) { repository := getMockRepositoryForNETest() manager := NewNamedEntityManager(repository, getMockConfigForNETest(), mockScope.NewTestScope()) - updatedFilters, err := manager.(*NamedEntityManager).getQueryFilters(core.ResourceType_TASK, "eq(state, 0)") + updatedFilters, err := manager.(*NamedEntityManager).getQueryFilters("eq(state, 0)") assert.NoError(t, err) assert.Len(t, updatedFilters, 1) @@ -97,13 +97,9 @@ func TestNamedEntityManager_getQueryFilters(t *testing.T) { assert.Equal(t, "COALESCE(state, 0) = ?", queryExp.Query) assert.Equal(t, "0", queryExp.Args) - updatedFilters, err = manager.(*NamedEntityManager).getQueryFilters(core.ResourceType_WORKFLOW, "") + updatedFilters, err = manager.(*NamedEntityManager).getQueryFilters("") assert.NoError(t, err) - assert.Len(t, updatedFilters, 1) - queryExp, err = updatedFilters[0].GetGormQueryExpr() - assert.NoError(t, err) - assert.Equal(t, "COALESCE(state, 0) <> ?", queryExp.Query) - assert.Equal(t, admin.NamedEntityState_SYSTEM_GENERATED, queryExp.Args) + assert.Len(t, updatedFilters, 0) } func TestNamedEntityManager_Update(t *testing.T) { diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution.go b/flyteadmin/pkg/manager/impl/util/single_task_execution.go index a82750f733..036610a9ec 100644 --- a/flyteadmin/pkg/manager/impl/util/single_task_execution.go +++ b/flyteadmin/pkg/manager/impl/util/single_task_execution.go @@ -165,7 +165,7 @@ func CreateOrGetWorkflowModel( } func CreateOrGetLaunchPlan(ctx context.Context, - db repositoryInterfaces.Repository, config runtimeInterfaces.Configuration, taskIdentifier *core.Identifier, + db repositoryInterfaces.Repository, config runtimeInterfaces.Configuration, namedEntityManager interfaces.NamedEntityInterface, taskIdentifier *core.Identifier, workflowInterface *core.TypedInterface, workflowID uint, spec *admin.ExecutionSpec) (*admin.LaunchPlan, error) { var launchPlan *admin.LaunchPlan var err error @@ -226,6 +226,19 @@ func CreateOrGetLaunchPlan(ctx context.Context, logger.Errorf(ctx, "Failed to save launch plan model [%+v] with err: %v", launchPlanIdentifier, err) return nil, err } + _, err = namedEntityManager.UpdateNamedEntity(ctx, &admin.NamedEntityUpdateRequest{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Id: &admin.NamedEntityIdentifier{ + Project: launchPlan.GetId().GetProject(), + Domain: launchPlan.GetId().GetDomain(), + Name: launchPlan.GetId().GetName(), + }, + Metadata: &admin.NamedEntityMetadata{State: admin.NamedEntityState_SYSTEM_GENERATED}, + }) + if err != nil { + logger.Warningf(ctx, "Failed to set launch plan state to system-generated: %v", err) + return nil, err + } } return launchPlan, nil diff --git a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go index 7b64f142f9..13ed4a945d 100644 --- a/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go +++ b/flyteadmin/pkg/manager/impl/util/single_task_execution_test.go @@ -217,6 +217,21 @@ func TestCreateOrGetLaunchPlan(t *testing.T) { }, } workflowID := uint(12) + + mockNamedEntityManager := managerMocks.NamedEntityManager{} + mockNamedEntityManager.UpdateNamedEntityFunc = func(ctx context.Context, request *admin.NamedEntityUpdateRequest) (*admin.NamedEntityUpdateResponse, error) { + assert.Equal(t, request.ResourceType, core.ResourceType_LAUNCH_PLAN) + assert.True(t, proto.Equal(request.Id, &admin.NamedEntityIdentifier{ + Project: "flytekit", + Domain: "production", + Name: ".flytegen.app.workflows.MyWorkflow.my_task", + }), fmt.Sprintf("%+v", request.Id)) + assert.True(t, proto.Equal(request.Metadata, &admin.NamedEntityMetadata{ + State: admin.NamedEntityState_SYSTEM_GENERATED, + })) + return &admin.NamedEntityUpdateResponse{}, nil + } + taskIdentifier := &core.Identifier{ ResourceType: core.ResourceType_TASK, Project: "flytekit", @@ -233,7 +248,7 @@ func TestCreateOrGetLaunchPlan(t *testing.T) { }, } launchPlan, err := CreateOrGetLaunchPlan( - context.Background(), repository, config, taskIdentifier, workflowInterface, workflowID, &spec) + context.Background(), repository, config, &mockNamedEntityManager, taskIdentifier, workflowInterface, workflowID, &spec) assert.NoError(t, err) assert.True(t, proto.Equal(&core.Identifier{ ResourceType: core.ResourceType_LAUNCH_PLAN, diff --git a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go index 685bdfe4b5..e9af05f527 100644 --- a/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go +++ b/flyteadmin/pkg/manager/impl/validation/named_entity_validator.go @@ -10,7 +10,7 @@ import ( "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" ) -var archivableResourceTypes = sets.NewInt32(int32(core.ResourceType_WORKFLOW), int32(core.ResourceType_TASK)) +var archivableResourceTypes = sets.NewInt32(int32(core.ResourceType_WORKFLOW), int32(core.ResourceType_TASK), int32(core.ResourceType_LAUNCH_PLAN)) func ValidateNamedEntityGetRequest(request *admin.NamedEntityGetRequest) error { if err := ValidateResourceType(request.ResourceType); err != nil { diff --git a/flyteadmin/pkg/manager/impl/validation/named_entity_validator_test.go b/flyteadmin/pkg/manager/impl/validation/named_entity_validator_test.go index cec2af94ee..025d4eca0c 100644 --- a/flyteadmin/pkg/manager/impl/validation/named_entity_validator_test.go +++ b/flyteadmin/pkg/manager/impl/validation/named_entity_validator_test.go @@ -109,7 +109,7 @@ func TestValidateNamedEntityUpdateRequest(t *testing.T) { }, })) assert.Equal(t, codes.InvalidArgument, ValidateNamedEntityUpdateRequest(&admin.NamedEntityUpdateRequest{ - ResourceType: core.ResourceType_LAUNCH_PLAN, + ResourceType: core.ResourceType_DATASET, Id: &admin.NamedEntityIdentifier{ Project: "project", Domain: "domain", @@ -141,6 +141,30 @@ func TestValidateNamedEntityUpdateRequest(t *testing.T) { State: admin.NamedEntityState_NAMED_ENTITY_ARCHIVED, }, })) + + assert.Nil(t, ValidateNamedEntityUpdateRequest(&admin.NamedEntityUpdateRequest{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Metadata: &admin.NamedEntityMetadata{ + Description: "description", + }, + })) + + assert.Nil(t, ValidateNamedEntityUpdateRequest(&admin.NamedEntityUpdateRequest{ + ResourceType: core.ResourceType_LAUNCH_PLAN, + Id: &admin.NamedEntityIdentifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + Metadata: &admin.NamedEntityMetadata{ + State: admin.NamedEntityState_NAMED_ENTITY_ARCHIVED, + }, + })) } func TestValidateNamedEntityListRequest(t *testing.T) { From 94861720a0114569cf749dea7b86938e7b92d956 Mon Sep 17 00:00:00 2001 From: Akinori Mitani Date: Wed, 6 Nov 2024 16:35:09 -0800 Subject: [PATCH 16/18] Fix link in README.md (#5957) Signed-off-by: Akinori Mitani --- flyteidl/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flyteidl/README.md b/flyteidl/README.md index 25a5badebc..e17472a3cf 100644 --- a/flyteidl/README.md +++ b/flyteidl/README.md @@ -11,4 +11,4 @@ This is one of the core components of Flyte. It contains the Specification of th ## Contributing to Flyteidl -See the [contributing docs](protocs/contributing.md) for more information. +See the [contributing docs](protos/docs/contributing.md) for more information. From fef67b8d54171835831bd5b1b23059acb4c13fe1 Mon Sep 17 00:00:00 2001 From: jkh <1220444+jkhales@users.noreply.github.com> Date: Thu, 7 Nov 2024 10:57:06 -0800 Subject: [PATCH 17/18] Fix indentation for security block in auth_setup.rst (#5968) I believe `security` belongs under `server`. Without making this change, the auth didn't work for me. I further checked this by noting that the example helm file listed on https://artifacthub.io/packages/helm/flyte/flyte-core also places `security` under `server` `configmap.adminServer.server.security.useAuth` Signed-off-by: jkh <1220444+jkhales@users.noreply.github.com> --- docs/deployment/configuration/auth_setup.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/deployment/configuration/auth_setup.rst b/docs/deployment/configuration/auth_setup.rst index 9d628f5790..77681d71ba 100644 --- a/docs/deployment/configuration/auth_setup.rst +++ b/docs/deployment/configuration/auth_setup.rst @@ -254,15 +254,15 @@ Apply OIDC Configuration httpPort: 8088 grpc: port: 8089 - security: - secure: false - useAuth: true - allowCors: true - allowedOrigins: - # Accepting all domains for Sandbox installation - - "*" - allowedHeaders: - - "Content-Type" + security: + secure: false + useAuth: true + allowCors: true + allowedOrigins: + # Accepting all domains for Sandbox installation + - "*" + allowedHeaders: + - "Content-Type" auth: appAuth: thirdPartyConfig: From b5f23a693c62570dc92473e5e550e7be50ddad3a Mon Sep 17 00:00:00 2001 From: Wei-Yu Kao <115421902+wayner0628@users.noreply.github.com> Date: Thu, 7 Nov 2024 21:48:02 -0800 Subject: [PATCH 18/18] [copilot][flytedirectory] multipart blob download (#5715) * add download multipart blob Signed-off-by: wayner0628 * recursively process subparts Signed-off-by: wayner0628 * implement GetItems function Signed-off-by: wayner0628 * add unit testing Signed-off-by: wayner0628 * Parallelly handle blob items Signed-off-by: wayner0628 * fix lint error Signed-off-by: wayner0628 * implement GetItems function Signed-off-by: wayner0628 * add mutex avoid racing Signed-off-by: wayner0628 * avoid infinite call Signed-off-by: wayner0628 * protect critical variables Signed-off-by: wayner0628 * avoid infinite call Signed-off-by: wayner0628 * lint Signed-off-by: wayner0628 * add more unit tests Signed-off-by: wayner0628 * add more unit tests Signed-off-by: wayner0628 * fix mock Signed-off-by: wayner0628 * Accept incoming changes Signed-off-by: wayner0628 * multipart blob download based on new api Signed-off-by: wayner0628 * cache store stop listing at end cursor Signed-off-by: wayner0628 * lint Signed-off-by: wayner0628 * remove old api mock Signed-off-by: wayner0628 * remove old api mock Signed-off-by: wayner0628 * remove old api mock Signed-off-by: wayner0628 * update mem_store List to return global path Signed-off-by: wayner0628 * change mkdir perm Signed-off-by: wayner0628 * add comments and handle more errors Signed-off-by: wayner0628 * lint Co-authored-by: Han-Ru Chen (Future-Outlier) Signed-off-by: Wei-Yu Kao <115421902+wayner0628@users.noreply.github.com> * address race condition and aggregate errors Signed-off-by: wayner0628 * fix tests Signed-off-by: Future-Outlier * err msg enhancement Signed-off-by: Future-Outlier --------- Signed-off-by: wayner0628 Signed-off-by: Wei-Yu Kao <115421902+wayner0628@users.noreply.github.com> Signed-off-by: Future-Outlier Co-authored-by: Han-Ru Chen (Future-Outlier) --- flytecopilot/data/download.go | 204 ++++++++++++++++++++++++----- flytecopilot/data/download_test.go | 151 +++++++++++++++++++++ flytestdlib/storage/mem_store.go | 16 ++- flytestdlib/storage/storage.go | 6 +- 4 files changed, 339 insertions(+), 38 deletions(-) create mode 100644 flytecopilot/data/download_test.go diff --git a/flytecopilot/data/download.go b/flytecopilot/data/download.go index 0fd1f10bd9..e4efa22222 100644 --- a/flytecopilot/data/download.go +++ b/flytecopilot/data/download.go @@ -8,8 +8,10 @@ import ( "io/ioutil" "os" "path" + "path/filepath" "reflect" "strconv" + "sync" "github.com/ghodss/yaml" "github.com/golang/protobuf/jsonpb" @@ -31,57 +33,187 @@ type Downloader struct { mode core.IOStrategy_DownloadMode } -// TODO add support for multipart blobs -func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toFilePath string) (interface{}, error) { - ref := storage.DataReference(blob.Uri) - scheme, _, _, err := ref.Split() +// TODO add timeout and rate limit +// TODO use chunk to download +func (d Downloader) handleBlob(ctx context.Context, blob *core.Blob, toPath string) (interface{}, error) { + /* + handleBlob handles the retrieval and local storage of blob data, including support for both single and multipart blob types. + For multipart blobs, it lists all parts recursively and spawns concurrent goroutines to download each part while managing file I/O in parallel. + + - The function begins by validating the blob URI and categorizing the blob type (single or multipart). + - In the multipart case, it recursively lists all blob parts and launches goroutines to download and save each part. + Goroutine closure and I/O success tracking are managed to avoid resource leaks. + - For single-part blobs, it directly downloads and writes the data to the specified path. + + Life Cycle: + 1. Blob URI -> Blob Metadata Type check -> Recursive List parts if Multipart -> Launch goroutines to download parts + (input blob object) (determine multipart/single) (List API, handles recursive case) (each part handled in parallel) + 2. Download part or full blob -> Save locally with error checks -> Handle reader/writer closures -> Return local path or error + (download each part) (error on write or directory) (close streams safely, track success) (completion or report missing closures) + */ + + blobRef := storage.DataReference(blob.Uri) + scheme, _, _, err := blobRef.Split() if err != nil { return nil, errors.Wrapf(err, "Blob uri incorrectly formatted") } - var reader io.ReadCloser - if scheme == "http" || scheme == "https" { - reader, err = DownloadFileFromHTTP(ctx, ref) - } else { - if blob.GetMetadata().GetType().Dimensionality == core.BlobType_MULTIPART { - logger.Warnf(ctx, "Currently only single part blobs are supported, we will force multipart to be 'path/00000'") - ref, err = d.store.ConstructReference(ctx, ref, "000000") - if err != nil { + + if blob.GetMetadata().GetType().Dimensionality == core.BlobType_MULTIPART { + // Collect all parts of the multipart blob recursively (List API handles nested directories) + // Set maxItems to 100 as a parameter for the List API, enabling batch retrieval of items until all are downloaded + maxItems := 100 + cursor := storage.NewCursorAtStart() + var items []storage.DataReference + var absPaths []string + for { + items, cursor, err = d.store.List(ctx, blobRef, maxItems, cursor) + if err != nil || len(items) == 0 { + logger.Errorf(ctx, "failed to collect items from multipart blob [%s]", blobRef) return nil, err } + for _, item := range items { + absPaths = append(absPaths, item.String()) + } + if storage.IsCursorEnd(cursor) { + break + } + } + + // Track the count of successful downloads and the total number of items + downloadSuccess := 0 + itemCount := len(absPaths) + // Track successful closures of readers and writers in deferred functions + readerCloseSuccessCount := 0 + writerCloseSuccessCount := 0 + // We use Mutex to avoid race conditions when updating counters and creating directories + var mu sync.Mutex + var wg sync.WaitGroup + for _, absPath := range absPaths { + absPath := absPath + + wg.Add(1) + go func() { + defer wg.Done() + defer func() { + if err := recover(); err != nil { + logger.Errorf(ctx, "recover receives error: [%s]", err) + } + }() + + ref := storage.DataReference(absPath) + reader, err := DownloadFileFromStorage(ctx, ref, d.store) + if err != nil { + logger.Errorf(ctx, "Failed to download from ref [%s]", ref) + return + } + defer func() { + err := reader.Close() + if err != nil { + logger.Errorf(ctx, "failed to close Blob read stream @ref [%s].\n"+ + "Error: %s", ref, err) + } + mu.Lock() + readerCloseSuccessCount++ + mu.Unlock() + }() + + _, _, k, err := ref.Split() + if err != nil { + logger.Errorf(ctx, "Failed to parse ref [%s]", ref) + return + } + newPath := filepath.Join(toPath, k) + dir := filepath.Dir(newPath) + + mu.Lock() + // os.MkdirAll creates the specified directory structure if it doesn’t already exist + // 0777: the directory can be read and written by anyone + err = os.MkdirAll(dir, 0777) + mu.Unlock() + if err != nil { + logger.Errorf(ctx, "failed to make dir at path [%s]", dir) + return + } + + writer, err := os.Create(newPath) + if err != nil { + logger.Errorf(ctx, "failed to open file at path [%s]", newPath) + return + } + defer func() { + err := writer.Close() + if err != nil { + logger.Errorf(ctx, "failed to close File write stream.\n"+ + "Error: [%s]", err) + } + mu.Lock() + writerCloseSuccessCount++ + mu.Unlock() + }() + + _, err = io.Copy(writer, reader) + if err != nil { + logger.Errorf(ctx, "failed to write remote data to local filesystem") + return + } + mu.Lock() + downloadSuccess++ + mu.Unlock() + }() + } + // Go routines are synchronized with a WaitGroup to prevent goroutine leaks. + wg.Wait() + if downloadSuccess != itemCount || readerCloseSuccessCount != itemCount || writerCloseSuccessCount != itemCount { + return nil, errors.Errorf( + "Failed to copy %d out of %d remote files from [%s] to local [%s].\n"+ + "Failed to close %d readers\n"+ + "Failed to close %d writers.", + itemCount-downloadSuccess, itemCount, blobRef, toPath, itemCount-readerCloseSuccessCount, itemCount-writerCloseSuccessCount, + ) + } + logger.Infof(ctx, "successfully copied %d remote files from [%s] to local [%s]", downloadSuccess, blobRef, toPath) + return toPath, nil + } else if blob.GetMetadata().GetType().Dimensionality == core.BlobType_SINGLE { + // reader should be declared here (avoid being shared across all goroutines) + var reader io.ReadCloser + if scheme == "http" || scheme == "https" { + reader, err = DownloadFileFromHTTP(ctx, blobRef) + } else { + reader, err = DownloadFileFromStorage(ctx, blobRef, d.store) } - reader, err = DownloadFileFromStorage(ctx, ref, d.store) - } - if err != nil { - logger.Errorf(ctx, "Failed to download from ref [%s]", ref) - return nil, err - } - defer func() { - err := reader.Close() if err != nil { - logger.Errorf(ctx, "failed to close Blob read stream @ref [%s]. Error: %s", ref, err) + logger.Errorf(ctx, "Failed to download from ref [%s]", blobRef) + return nil, err } - }() + defer func() { + err := reader.Close() + if err != nil { + logger.Errorf(ctx, "failed to close Blob read stream @ref [%s]. Error: %s", blobRef, err) + } + }() - writer, err := os.Create(toFilePath) - if err != nil { - return nil, errors.Wrapf(err, "failed to open file at path %s", toFilePath) - } - defer func() { - err := writer.Close() + writer, err := os.Create(toPath) if err != nil { - logger.Errorf(ctx, "failed to close File write stream. Error: %s", err) + return nil, errors.Wrapf(err, "failed to open file at path %s", toPath) } - }() - v, err := io.Copy(writer, reader) - if err != nil { - return nil, errors.Wrapf(err, "failed to write remote data to local filesystem") + defer func() { + err := writer.Close() + if err != nil { + logger.Errorf(ctx, "failed to close File write stream. Error: %s", err) + } + }() + v, err := io.Copy(writer, reader) + if err != nil { + return nil, errors.Wrapf(err, "failed to write remote data to local filesystem") + } + logger.Infof(ctx, "Successfully copied [%d] bytes remote data from [%s] to local [%s]", v, blobRef, toPath) + return toPath, nil } - logger.Infof(ctx, "Successfully copied [%d] bytes remote data from [%s] to local [%s]", v, ref, toFilePath) - return toFilePath, nil + + return nil, errors.Errorf("unexpected Blob type encountered") } func (d Downloader) handleSchema(ctx context.Context, schema *core.Schema, toFilePath string) (interface{}, error) { - // TODO Handle schema type return d.handleBlob(ctx, &core.Blob{Uri: schema.Uri, Metadata: &core.BlobMetadata{Type: &core.BlobType{Dimensionality: core.BlobType_MULTIPART}}}, toFilePath) } diff --git a/flytecopilot/data/download_test.go b/flytecopilot/data/download_test.go new file mode 100644 index 0000000000..1f3b3a7be6 --- /dev/null +++ b/flytecopilot/data/download_test.go @@ -0,0 +1,151 @@ +package data + +import ( + "bytes" + "context" + "os" + "path/filepath" + "testing" + + "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core" + "github.com/flyteorg/flyte/flytestdlib/promutils" + "github.com/flyteorg/flyte/flytestdlib/storage" + + "github.com/stretchr/testify/assert" +) + +func TestHandleBlobMultipart(t *testing.T) { + t.Run("Successful Query", func(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + ref := storage.DataReference("s3://container/folder/file1") + s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + ref = storage.DataReference("s3://container/folder/file2") + s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + + d := Downloader{store: s} + + blob := &core.Blob{ + Uri: "s3://container/folder", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_MULTIPART, + }, + }, + } + + toPath := "./inputs" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete directory: %v", err) + } + }() + + result, err := d.handleBlob(context.Background(), blob, toPath) + assert.NoError(t, err) + assert.Equal(t, toPath, result) + + // Check if files were created and data written + for _, file := range []string{"file1", "file2"} { + if _, err := os.Stat(filepath.Join(toPath, "folder", file)); os.IsNotExist(err) { + t.Errorf("expected file %s to exist", file) + } + } + }) + + t.Run("No Items", func(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + + d := Downloader{store: s} + + blob := &core.Blob{ + Uri: "s3://container/folder", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_MULTIPART, + }, + }, + } + + toPath := "./inputs" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete directory: %v", err) + } + }() + + result, err := d.handleBlob(context.Background(), blob, toPath) + assert.Error(t, err) + assert.Nil(t, result) + }) +} + +func TestHandleBlobSinglePart(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + ref := storage.DataReference("s3://container/file") + s.WriteRaw(context.Background(), ref, 0, storage.Options{}, bytes.NewReader([]byte{})) + + d := Downloader{store: s} + + blob := &core.Blob{ + Uri: "s3://container/file", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_SINGLE, + }, + }, + } + + toPath := "./input" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete file: %v", err) + } + }() + + result, err := d.handleBlob(context.Background(), blob, toPath) + assert.NoError(t, err) + assert.Equal(t, toPath, result) + + // Check if files were created and data written + if _, err := os.Stat(toPath); os.IsNotExist(err) { + t.Errorf("expected file %s to exist", toPath) + } +} + +func TestHandleBlobHTTP(t *testing.T) { + s, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + d := Downloader{store: s} + + blob := &core.Blob{ + Uri: "https://raw.githubusercontent.com/flyteorg/flyte/master/README.md", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_SINGLE, + }, + }, + } + + toPath := "./input" + defer func() { + err := os.RemoveAll(toPath) + if err != nil { + t.Errorf("Failed to delete file: %v", err) + } + }() + + result, err := d.handleBlob(context.Background(), blob, toPath) + assert.NoError(t, err) + assert.Equal(t, toPath, result) + + // Check if files were created and data written + if _, err := os.Stat(toPath); os.IsNotExist(err) { + t.Errorf("expected file %s to exist", toPath) + } +} diff --git a/flytestdlib/storage/mem_store.go b/flytestdlib/storage/mem_store.go index d9da9b5b1e..540423a2a0 100644 --- a/flytestdlib/storage/mem_store.go +++ b/flytestdlib/storage/mem_store.go @@ -9,6 +9,7 @@ import ( "io" "io/ioutil" "os" + "strings" "sync" ) @@ -60,7 +61,20 @@ func (s *InMemoryStore) Head(ctx context.Context, reference DataReference) (Meta } func (s *InMemoryStore) List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) { - return nil, NewCursorAtEnd(), fmt.Errorf("Not implemented yet") + var items []DataReference + prefix := strings.TrimSuffix(string(reference), "/") + "/" + + for ref := range s.cache { + if strings.HasPrefix(ref.String(), prefix) { + items = append(items, ref) + } + } + + if len(items) == 0 { + return nil, NewCursorAtEnd(), os.ErrNotExist + } + + return items, NewCursorAtEnd(), nil } func (s *InMemoryStore) ReadRaw(ctx context.Context, reference DataReference) (io.ReadCloser, error) { diff --git a/flytestdlib/storage/storage.go b/flytestdlib/storage/storage.go index 3d53a4d25f..e365816ff0 100644 --- a/flytestdlib/storage/storage.go +++ b/flytestdlib/storage/storage.go @@ -76,6 +76,10 @@ func NewCursorFromCustomPosition(customPosition string) Cursor { } } +func IsCursorEnd(cursor Cursor) bool { + return cursor.cursorState == AtEndCursorState +} + // DataStore is a simplified interface for accessing and storing data in one of the Cloud stores. // Today we rely on Stow for multi-cloud support, but this interface abstracts that part type DataStore struct { @@ -114,7 +118,7 @@ type RawStore interface { // Head gets metadata about the reference. This should generally be a light weight operation. Head(ctx context.Context, reference DataReference) (Metadata, error) - // List gets a list of items given a prefix, using a paginated API + // List gets a list of items (relative path to the reference input) given a prefix, using a paginated API List(ctx context.Context, reference DataReference, maxItems int, cursor Cursor) ([]DataReference, Cursor, error) // ReadRaw retrieves a byte array from the Blob store or an error