From 771158446e46dcbf01b673c8ecec45aa44c43ef4 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Thu, 4 Apr 2024 19:51:01 +0800 Subject: [PATCH 01/16] [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu [wip]refactor: add rust grpc client Signed-off-by: Austin Liu --- flyrs/.gitignore | 12 + flyrs/Cargo.toml | 23 + flyrs/build.rs | 41 + flyrs/build.sh | 1 + flyrs/protos/flyteidl/admin/agent.proto | 254 ++++ .../flyteidl/admin/cluster_assignment.proto | 11 + flyrs/protos/flyteidl/admin/common.proto | 327 ++++ .../flyteidl/admin/description_entity.proto | 95 ++ flyrs/protos/flyteidl/admin/event.proto | 70 + flyrs/protos/flyteidl/admin/execution.proto | 424 ++++++ flyrs/protos/flyteidl/admin/launch_plan.proto | 222 +++ .../flyteidl/admin/matchable_resource.proto | 190 +++ .../flyteidl/admin/node_execution.proto | 245 +++ .../protos/flyteidl/admin/notification.proto | 27 + flyrs/protos/flyteidl/admin/project.proto | 110 ++ .../flyteidl/admin/project_attributes.proto | 69 + .../admin/project_domain_attributes.proto | 80 + flyrs/protos/flyteidl/admin/schedule.proto | 43 + flyrs/protos/flyteidl/admin/signal.proto | 86 ++ flyrs/protos/flyteidl/admin/task.proto | 71 + .../flyteidl/admin/task_execution.proto | 168 +++ flyrs/protos/flyteidl/admin/version.proto | 27 + flyrs/protos/flyteidl/admin/workflow.proto | 92 ++ .../flyteidl/admin/workflow_attributes.proto | 89 ++ .../flyteidl/cacheservice/cacheservice.proto | 143 ++ flyrs/protos/flyteidl/core/artifact_id.proto | 112 ++ flyrs/protos/flyteidl/core/catalog.proto | 63 + flyrs/protos/flyteidl/core/compiler.proto | 64 + flyrs/protos/flyteidl/core/condition.proto | 63 + flyrs/protos/flyteidl/core/dynamic_job.proto | 32 + flyrs/protos/flyteidl/core/errors.proto | 35 + flyrs/protos/flyteidl/core/execution.proto | 116 ++ flyrs/protos/flyteidl/core/identifier.proto | 80 + flyrs/protos/flyteidl/core/interface.proto | 64 + flyrs/protos/flyteidl/core/literals.proto | 183 +++ flyrs/protos/flyteidl/core/metrics.proto | 50 + flyrs/protos/flyteidl/core/security.proto | 130 ++ flyrs/protos/flyteidl/core/tasks.proto | 351 +++++ flyrs/protos/flyteidl/core/types.proto | 208 +++ flyrs/protos/flyteidl/core/workflow.proto | 315 ++++ .../flyteidl/core/workflow_closure.proto | 18 + .../flyteidl/datacatalog/datacatalog.proto | 420 ++++++ flyrs/protos/flyteidl/event/cloudevents.proto | 73 + flyrs/protos/flyteidl/event/event.proto | 315 ++++ flyrs/protos/flyteidl/plugins/array_job.proto | 30 + flyrs/protos/flyteidl/plugins/dask.proto | 41 + .../flyteidl/plugins/kubeflow/common.proto | 33 + .../flyteidl/plugins/kubeflow/mpi.proto | 43 + .../flyteidl/plugins/kubeflow/pytorch.proto | 49 + .../plugins/kubeflow/tensorflow.proto | 42 + flyrs/protos/flyteidl/plugins/mpi.proto | 20 + flyrs/protos/flyteidl/plugins/presto.proto | 14 + flyrs/protos/flyteidl/plugins/pytorch.proto | 25 + flyrs/protos/flyteidl/plugins/qubole.proto | 26 + flyrs/protos/flyteidl/plugins/ray.proto | 50 + flyrs/protos/flyteidl/plugins/spark.proto | 34 + .../protos/flyteidl/plugins/tensorflow.proto | 18 + flyrs/protos/flyteidl/plugins/waitable.proto | 15 + flyrs/protos/flyteidl/service/admin.proto | 659 +++++++++ flyrs/protos/flyteidl/service/agent.proto | 79 + flyrs/protos/flyteidl/service/auth.proto | 94 ++ flyrs/protos/flyteidl/service/dataproxy.proto | 205 +++ .../service/external_plugin_service.proto | 79 + flyrs/protos/flyteidl/service/identity.proto | 51 + flyrs/protos/flyteidl/service/signal.proto | 55 + flyrs/protos/google/api/annotations.proto | 31 + flyrs/protos/google/api/client.proto | 99 ++ flyrs/protos/google/api/field_behavior.proto | 84 ++ flyrs/protos/google/api/http.proto | 375 +++++ flyrs/protos/google/api/resource.proto | 299 ++++ flyrs/protos/google/pubsub/v1/pubsub.proto | 1316 +++++++++++++++++ flyrs/protos/google/pubsub/v1/schema.proto | 289 ++++ .../options/annotations.proto | 44 + .../options/openapiv2.proto | 720 +++++++++ flyrs/setup.sh | 1 + flyrs/src/lib.rs | 144 ++ flyrs/test_flytekit_remote.py | 38 + flytekit/clients/friendly_rs.py | 1045 +++++++++++++ flytekit/remote/remote.py | 10 +- 79 files changed, 11767 insertions(+), 2 deletions(-) create mode 100644 flyrs/.gitignore create mode 100644 flyrs/Cargo.toml create mode 100644 flyrs/build.rs create mode 100644 flyrs/build.sh create mode 100644 flyrs/protos/flyteidl/admin/agent.proto create mode 100644 flyrs/protos/flyteidl/admin/cluster_assignment.proto create mode 100644 flyrs/protos/flyteidl/admin/common.proto create mode 100644 flyrs/protos/flyteidl/admin/description_entity.proto create mode 100644 flyrs/protos/flyteidl/admin/event.proto create mode 100644 flyrs/protos/flyteidl/admin/execution.proto create mode 100644 flyrs/protos/flyteidl/admin/launch_plan.proto create mode 100644 flyrs/protos/flyteidl/admin/matchable_resource.proto create mode 100644 flyrs/protos/flyteidl/admin/node_execution.proto create mode 100644 flyrs/protos/flyteidl/admin/notification.proto create mode 100644 flyrs/protos/flyteidl/admin/project.proto create mode 100644 flyrs/protos/flyteidl/admin/project_attributes.proto create mode 100644 flyrs/protos/flyteidl/admin/project_domain_attributes.proto create mode 100644 flyrs/protos/flyteidl/admin/schedule.proto create mode 100644 flyrs/protos/flyteidl/admin/signal.proto create mode 100644 flyrs/protos/flyteidl/admin/task.proto create mode 100644 flyrs/protos/flyteidl/admin/task_execution.proto create mode 100644 flyrs/protos/flyteidl/admin/version.proto create mode 100644 flyrs/protos/flyteidl/admin/workflow.proto create mode 100644 flyrs/protos/flyteidl/admin/workflow_attributes.proto create mode 100644 flyrs/protos/flyteidl/cacheservice/cacheservice.proto create mode 100644 flyrs/protos/flyteidl/core/artifact_id.proto create mode 100644 flyrs/protos/flyteidl/core/catalog.proto create mode 100644 flyrs/protos/flyteidl/core/compiler.proto create mode 100644 flyrs/protos/flyteidl/core/condition.proto create mode 100644 flyrs/protos/flyteidl/core/dynamic_job.proto create mode 100644 flyrs/protos/flyteidl/core/errors.proto create mode 100644 flyrs/protos/flyteidl/core/execution.proto create mode 100644 flyrs/protos/flyteidl/core/identifier.proto create mode 100644 flyrs/protos/flyteidl/core/interface.proto create mode 100644 flyrs/protos/flyteidl/core/literals.proto create mode 100644 flyrs/protos/flyteidl/core/metrics.proto create mode 100644 flyrs/protos/flyteidl/core/security.proto create mode 100644 flyrs/protos/flyteidl/core/tasks.proto create mode 100644 flyrs/protos/flyteidl/core/types.proto create mode 100644 flyrs/protos/flyteidl/core/workflow.proto create mode 100644 flyrs/protos/flyteidl/core/workflow_closure.proto create mode 100644 flyrs/protos/flyteidl/datacatalog/datacatalog.proto create mode 100644 flyrs/protos/flyteidl/event/cloudevents.proto create mode 100644 flyrs/protos/flyteidl/event/event.proto create mode 100644 flyrs/protos/flyteidl/plugins/array_job.proto create mode 100644 flyrs/protos/flyteidl/plugins/dask.proto create mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/common.proto create mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto create mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto create mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto create mode 100644 flyrs/protos/flyteidl/plugins/mpi.proto create mode 100644 flyrs/protos/flyteidl/plugins/presto.proto create mode 100644 flyrs/protos/flyteidl/plugins/pytorch.proto create mode 100644 flyrs/protos/flyteidl/plugins/qubole.proto create mode 100644 flyrs/protos/flyteidl/plugins/ray.proto create mode 100644 flyrs/protos/flyteidl/plugins/spark.proto create mode 100644 flyrs/protos/flyteidl/plugins/tensorflow.proto create mode 100644 flyrs/protos/flyteidl/plugins/waitable.proto create mode 100644 flyrs/protos/flyteidl/service/admin.proto create mode 100644 flyrs/protos/flyteidl/service/agent.proto create mode 100644 flyrs/protos/flyteidl/service/auth.proto create mode 100644 flyrs/protos/flyteidl/service/dataproxy.proto create mode 100644 flyrs/protos/flyteidl/service/external_plugin_service.proto create mode 100644 flyrs/protos/flyteidl/service/identity.proto create mode 100644 flyrs/protos/flyteidl/service/signal.proto create mode 100644 flyrs/protos/google/api/annotations.proto create mode 100644 flyrs/protos/google/api/client.proto create mode 100644 flyrs/protos/google/api/field_behavior.proto create mode 100644 flyrs/protos/google/api/http.proto create mode 100644 flyrs/protos/google/api/resource.proto create mode 100644 flyrs/protos/google/pubsub/v1/pubsub.proto create mode 100644 flyrs/protos/google/pubsub/v1/schema.proto create mode 100644 flyrs/protos/protoc-gen-openapiv2/options/annotations.proto create mode 100644 flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto create mode 100644 flyrs/setup.sh create mode 100644 flyrs/src/lib.rs create mode 100644 flyrs/test_flytekit_remote.py create mode 100644 flytekit/clients/friendly_rs.py diff --git a/flyrs/.gitignore b/flyrs/.gitignore new file mode 100644 index 0000000000..95b531ed13 --- /dev/null +++ b/flyrs/.gitignore @@ -0,0 +1,12 @@ +/target + +Cargo.lock + +pyproject.toml + + +# Added by cargo +# +# already existing elements were commented out + +#/target diff --git a/flyrs/Cargo.toml b/flyrs/Cargo.toml new file mode 100644 index 0000000000..e4ca144ba9 --- /dev/null +++ b/flyrs/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "flyrs" +version = "0.2.0" +edition = "2021" + +[lib] +# The name of the native library. This is the name which will be used in Python to import the +# library (i.e. `import string_sum`). If you change this, you must also change the name of the +# `#[pymodule]` in `src/lib.rs`. +name = "flyrs" +# "cdylib" is necessary to produce a shared library for Python to import from. +crate-type = ["cdylib"] + +[dependencies] +prost = "0.12.3" +tonic = "0.11.0" +tokio = { version = "1.9", features = ["full"] } +pyo3 = { version = "0.21", features = ["extension-module", "experimental-async"] } +pyo3-asyncio = { version = "0.14", features = ["tokio-runtime"] } +prost-types = "0.12.3" + +[build-dependencies] +tonic-build = "0.11.0" diff --git a/flyrs/build.rs b/flyrs/build.rs new file mode 100644 index 0000000000..a4cf7ec170 --- /dev/null +++ b/flyrs/build.rs @@ -0,0 +1,41 @@ +use std::fs; +use std::path::{PathBuf}; + +fn main() -> Result<(), Box> { + let proto_package_dirs:Vec<&str> = ["protos/flyteidl/admin/", "protos/flyteidl/cacheservice/", "protos/flyteidl/core/", "protos/flyteidl/datacatalog/", "protos/flyteidl/event/", "protos/flyteidl/plugins/", "protos/flyteidl/service/"].to_vec(); + let out_dir = concat!("src/", env!("PB_OUT_DIR")); // Avoid using `OUT_DIR`. It's already used by tonic_build and will have side effects in the target build folder. + for package_dir in proto_package_dirs.iter() { + let proto_files = find_proto_files(package_dir)?; + let proto_files_paths: Vec<&str> =proto_files.iter().map(|path| path.to_str().unwrap()).collect(); + println!("{}", format!("{:?}", proto_files_paths)); + + tonic_build::configure() + .build_server(false) + // .compile_well_known_types(true) // Defaults to false. Enable it if you don't want tonic_build to handle Well-known types by adding the `prost-types` crate automatically. + .out_dir(out_dir) + .compile( + &proto_files_paths, + &["protos/"], // same as arg `-I`` in `protoc`, it's the root folder when impoting other *.proto files. + )?; + } + Ok(()) +} + +fn find_proto_files(dir: &str) -> Result, std::io::Error> { + let mut proto_files = Vec::new(); + if let Ok(entries) = fs::read_dir(dir) { + for entry in entries { + if let Ok(entry) = entry { + let path = entry.path(); + if path.is_file() && path.extension().map_or(false, |ext| ext == "proto") { + proto_files.push(path); + } else if path.is_dir() { + if let Ok(mut nested_proto_files) = find_proto_files(&path.to_str().unwrap()) { + proto_files.append(&mut nested_proto_files); + } + } + } + } + } + Ok(proto_files) +} diff --git a/flyrs/build.sh b/flyrs/build.sh new file mode 100644 index 0000000000..516f59fc7d --- /dev/null +++ b/flyrs/build.sh @@ -0,0 +1 @@ +maturin develop --release \ No newline at end of file diff --git a/flyrs/protos/flyteidl/admin/agent.proto b/flyrs/protos/flyteidl/admin/agent.proto new file mode 100644 index 0000000000..b84171c8d6 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/agent.proto @@ -0,0 +1,254 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/literals.proto"; +import "flyteidl/core/tasks.proto"; +import "flyteidl/core/workflow.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/metrics.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/struct.proto"; + +// The state of the execution is used to control its visibility in the UI/CLI. +enum State { + option deprecated = true; + RETRYABLE_FAILURE = 0; + PERMANENT_FAILURE = 1; + PENDING = 2; + RUNNING = 3; + SUCCEEDED = 4; +} + +// Represents a subset of runtime task execution metadata that are relevant to external plugins. +message TaskExecutionMetadata { + // ID of the task execution + core.TaskExecutionIdentifier task_execution_id = 1; + // k8s namespace where the task is executed in + string namespace = 2; + // Labels attached to the task execution + map labels = 3; + // Annotations attached to the task execution + map annotations = 4; + // k8s service account associated with the task execution + string k8s_service_account = 5; + // Environment variables attached to the task execution + map environment_variables = 6; + // Represents the maximum number of attempts allowed for a task. + // If a task fails, it can be retried up to this maximum number of attempts. + int32 max_attempts = 7; + // Indicates whether the task execution can be interrupted. + // If set to true, the task can be stopped before completion. + bool interruptible = 8; + // Specifies the threshold for failure count at which the interruptible property + // will take effect. If the number of consecutive task failures exceeds this threshold, + // interruptible behavior will be activated. + int32 interruptible_failure_threshold = 9; + // Overrides for specific properties of the task node. + // These overrides can be used to customize the behavior of the task node. + core.TaskNodeOverrides overrides = 10; +} + +// Represents a request structure to create task. +message CreateTaskRequest { + // The inputs required to start the execution. All required inputs must be + // included in this map. If not required and not provided, defaults apply. + // +optional + core.LiteralMap inputs = 1; + // Template of the task that encapsulates all the metadata of the task. + core.TaskTemplate template = 2; + // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + string output_prefix = 3; + // subset of runtime task execution metadata. + TaskExecutionMetadata task_execution_metadata = 4; +} + +// Represents a create response structure. +message CreateTaskResponse { + // ResourceMeta is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + bytes resource_meta = 1; +} + +message CreateRequestHeader { + // Template of the task that encapsulates all the metadata of the task. + core.TaskTemplate template = 1; + // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + string output_prefix = 2; + // subset of runtime task execution metadata. + TaskExecutionMetadata task_execution_metadata = 3; + // MaxDatasetSizeBytes is the maximum size of the dataset that can be generated by the task. + int64 max_dataset_size_bytes = 4; +} + + +message ExecuteTaskSyncRequest { + oneof part { + CreateRequestHeader header = 1; + core.LiteralMap inputs = 2; + } +} + +message ExecuteTaskSyncResponseHeader { + Resource resource = 1; +} + +message ExecuteTaskSyncResponse { + // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + // Resource is for synchronous task execution. + oneof res { + ExecuteTaskSyncResponseHeader header = 1; + core.LiteralMap outputs = 2; + } +} + +// A message used to fetch a job resource from flyte agent server. +message GetTaskRequest { + // A predefined yet extensible Task type identifier. + string task_type = 1 [deprecated = true]; + // Metadata about the resource to be pass to the agent. + bytes resource_meta = 2; + // A predefined yet extensible Task type identifier. + TaskCategory task_category = 3; +} + +// Response to get an individual task resource. +message GetTaskResponse { + Resource resource = 1; +} + +message Resource { + // DEPRECATED. The state of the execution is used to control its visibility in the UI/CLI. + State state = 1 [deprecated = true]; + // The outputs of the execution. It's typically used by sql task. Agent service will create a + // Structured dataset pointing to the query result table. + // +optional + core.LiteralMap outputs = 2; + // A descriptive message for the current state. e.g. waiting for cluster. + string message = 3; + // log information for the task execution. + repeated core.TaskLog log_links = 4; + // The phase of the execution is used to determine the phase of the plugin's execution. + core.TaskExecution.Phase phase = 5; + // Custom data specific to the agent. + google.protobuf.Struct custom_info = 6; +} + +// A message used to delete a task. +message DeleteTaskRequest { + // A predefined yet extensible Task type identifier. + string task_type = 1 [deprecated = true]; + // Metadata about the resource to be pass to the agent. + bytes resource_meta = 2; + // A predefined yet extensible Task type identifier. + TaskCategory task_category = 3; +} + +// Response to delete a task. +message DeleteTaskResponse {} + +// A message containing the agent metadata. +message Agent { + // Name is the developer-assigned name of the agent. + string name = 1; + + // SupportedTaskTypes are the types of the tasks that the agent can handle. + repeated string supported_task_types = 2 [deprecated = true]; + + // IsSync indicates whether this agent is a sync agent. Sync agents are expected to return their + // results synchronously when called by propeller. Given that sync agents can affect the performance + // of the system, it's important to enforce strict timeout policies. + // An Async agent, on the other hand, is required to be able to identify jobs by an + // identifier and query for job statuses as jobs progress. + bool is_sync = 3; + + // Supported_task_categories are the categories of the tasks that the agent can handle. + repeated TaskCategory supported_task_categories = 4; +} + +message TaskCategory { + // The name of the task type. + string name = 1; + // The version of the task type. + int32 version = 2; +} + +// A request to get an agent. +message GetAgentRequest { + // The name of the agent. + string name = 1; +} + +// A response containing an agent. +message GetAgentResponse { + Agent agent = 1; +} + +// A request to list all agents. +message ListAgentsRequest {} + +// A response containing a list of agents. +message ListAgentsResponse { + repeated Agent agents = 1; +} + +// A request to get the metrics from a task execution. +message GetTaskMetricsRequest { + // A predefined yet extensible Task type identifier. + string task_type = 1 [deprecated = true]; + // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + bytes resource_meta = 2; + // The metrics to query. If empty, will return a default set of metrics. + // e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG + repeated string queries = 3; + // Start timestamp, inclusive. + google.protobuf.Timestamp start_time = 4; + // End timestamp, inclusive.. + google.protobuf.Timestamp end_time = 5; + // Query resolution step width in duration format or float number of seconds. + google.protobuf.Duration step = 6; + // A predefined yet extensible Task type identifier. + TaskCategory task_category = 7; +} + +// A response containing a list of metrics for a task execution. +message GetTaskMetricsResponse { + // The execution metric results. + repeated core.ExecutionMetricResult results = 1; +} + +// A request to get the log from a task execution. +message GetTaskLogsRequest { + // A predefined yet extensible Task type identifier. + string task_type = 1 [deprecated = true]; + // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + bytes resource_meta = 2; + // Number of lines to return. + uint64 lines = 3; + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 4; + // A predefined yet extensible Task type identifier. + TaskCategory task_category = 5; +} + +message GetTaskLogsResponseHeader { + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 1; +} + +message GetTaskLogsResponseBody { + // The execution log results. + repeated string results = 1; +} + +// A response containing the logs for a task execution. +message GetTaskLogsResponse { + oneof part { + GetTaskLogsResponseHeader header = 1; + GetTaskLogsResponseBody body = 2; + } +} diff --git a/flyrs/protos/flyteidl/admin/cluster_assignment.proto b/flyrs/protos/flyteidl/admin/cluster_assignment.proto new file mode 100644 index 0000000000..6a55798436 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/cluster_assignment.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + + +// Encapsulates specifications for routing an execution onto a specific cluster. +message ClusterAssignment { + reserved 1, 2; + string cluster_pool_name = 3; +} diff --git a/flyrs/protos/flyteidl/admin/common.proto b/flyrs/protos/flyteidl/admin/common.proto new file mode 100644 index 0000000000..6c04b0531a --- /dev/null +++ b/flyrs/protos/flyteidl/admin/common.proto @@ -0,0 +1,327 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/execution.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/literals.proto"; +import "google/protobuf/timestamp.proto"; + +// Encapsulation of fields that identifies a Flyte resource. +// A Flyte resource can be a task, workflow or launch plan. +// A resource can internally have multiple versions and is uniquely identified +// by project, domain, and name. +message NamedEntityIdentifier { + // Name of the project the resource belongs to. + string project = 1; + // Name of the domain the resource belongs to. + // A domain can be considered as a subset within a specific project. + string domain = 2; + // User provided value for the resource. + // The combination of project + domain + name uniquely identifies the resource. + // +optional - in certain contexts - like 'List API', 'Launch plans' + string name = 3; + + // Optional, org key applied to the resource. + string org = 4; +} + +// The status of the named entity is used to control its visibility in the UI. +enum NamedEntityState { + // By default, all named entities are considered active and under development. + NAMED_ENTITY_ACTIVE = 0; + + // Archived named entities are no longer visible in the UI. + NAMED_ENTITY_ARCHIVED = 1; + + // System generated entities that aren't explicitly created or managed by a user. + SYSTEM_GENERATED = 2; +} + +// Additional metadata around a named entity. +message NamedEntityMetadata { + // Common description across all versions of the entity + // +optional + string description = 1; + + // Shared state across all version of the entity + // At this point in time, only workflow entities can have their state archived. + NamedEntityState state = 2; +} + +// Encapsulates information common to a NamedEntity, a Flyte resource such as a task, +// workflow or launch plan. A NamedEntity is exclusively identified by its resource type +// and identifier. +message NamedEntity { + // Resource type of the named entity. One of Task, Workflow or LaunchPlan. + flyteidl.core.ResourceType resource_type = 1; + NamedEntityIdentifier id = 2; + + // Additional metadata around a named entity. + NamedEntityMetadata metadata = 3; +} + +// Specifies sort ordering in a list request. +message Sort { + enum Direction { + + // By default, fields are sorted in descending order. + DESCENDING = 0; + ASCENDING = 1; + } + // Indicates an attribute to sort the response values. + // +required + string key = 1; + + // Indicates the direction to apply sort key for response values. + // +optional + Direction direction = 2; +} + +// Represents a request structure to list NamedEntityIdentifiers. +message NamedEntityIdentifierListRequest { + // Name of the project that contains the identifiers. + // +required + string project = 1; + + // Name of the domain the identifiers belongs to within the project. + // +required + string domain = 2; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 3; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 4; + + // Specifies how listed entities should be sorted in the response. + // +optional + Sort sort_by = 5; + + // Indicates a list of filters passed as string. + // +optional + string filters = 6; + + // Optional, org key applied to the resource. + string org = 7; +} + +// Represents a request structure to list NamedEntity objects +message NamedEntityListRequest { + // Resource type of the metadata to query. One of Task, Workflow or LaunchPlan. + // +required + flyteidl.core.ResourceType resource_type = 1; + // Name of the project that contains the identifiers. + // +required + string project = 2; + // Name of the domain the identifiers belongs to within the project. + string domain = 3; + // Indicates the number of resources to be returned. + uint32 limit = 4; + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 5; + + // Specifies how listed entities should be sorted in the response. + // +optional + Sort sort_by = 6; + + // Indicates a list of filters passed as string. + // +optional + string filters = 7; + + // Optional, org key applied to the resource. + string org = 8; +} + +// Represents a list of NamedEntityIdentifiers. +message NamedEntityIdentifierList { + // A list of identifiers. + repeated NamedEntityIdentifier entities = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Represents a list of NamedEntityIdentifiers. +message NamedEntityList { + // A list of NamedEntity objects + repeated NamedEntity entities = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// A request to retrieve the metadata associated with a NamedEntityIdentifier +message NamedEntityGetRequest { + // Resource type of the metadata to get. One of Task, Workflow or LaunchPlan. + // +required + flyteidl.core.ResourceType resource_type = 1; + + // The identifier for the named entity for which to fetch metadata. + // +required + NamedEntityIdentifier id = 2; +} + +// Request to set the referenced named entity state to the configured value. +message NamedEntityUpdateRequest { + // Resource type of the metadata to update + // +required + flyteidl.core.ResourceType resource_type = 1; + + // Identifier of the metadata to update + // +required + NamedEntityIdentifier id = 2; + + // Metadata object to set as the new value + // +required + NamedEntityMetadata metadata = 3; +} + +// Purposefully empty, may be populated in the future. +message NamedEntityUpdateResponse { +} + +// Shared request structure to fetch a single resource. +// Resources include: Task, Workflow, LaunchPlan +message ObjectGetRequest { + // Indicates a unique version of resource. + // +required + core.Identifier id = 1; +} + +// Shared request structure to retrieve a list of resources. +// Resources include: Task, Workflow, LaunchPlan +message ResourceListRequest { + // id represents the unique identifier of the resource. + // +required + NamedEntityIdentifier id = 1; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 2; + + // In the case of multiple pages of results, this server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 3; + + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 4; + + // Sort ordering. + // +optional + Sort sort_by = 5; +} + +// Defines an email notification specification. +message EmailNotification { + // The list of email addresses recipients for this notification. + // +required + repeated string recipients_email = 1; +} + +// Defines a pager duty notification specification. +message PagerDutyNotification { + // Currently, PagerDuty notifications leverage email to trigger a notification. + // +required + repeated string recipients_email = 1; +} + +// Defines a slack notification specification. +message SlackNotification { + // Currently, Slack notifications leverage email to trigger a notification. + // +required + repeated string recipients_email = 1; +} + +// Represents a structure for notifications based on execution status. +// The notification content is configured within flyte admin but can be templatized. +// Future iterations could expose configuring notifications with custom content. +message Notification { + // A list of phases to which users can associate the notifications to. + // +required + repeated core.WorkflowExecution.Phase phases = 1; + + // The type of notification to trigger. + // +required + oneof type { + EmailNotification email = 2; + PagerDutyNotification pager_duty = 3; + SlackNotification slack = 4; + } + +} + +// Represents a string url and associated metadata used throughout the platform. +message UrlBlob { + option deprecated = true; + + // Actual url value. + string url = 1; + + // Represents the size of the file accessible at the above url. + int64 bytes = 2; +} + +// Label values to be applied to an execution resource. +// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +// to specify how to merge labels defined at registration and execution time. +message Labels { + // Map of custom labels to be applied to the execution resource. + map values = 1; +} + +// Annotation values to be applied to an execution resource. +// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +// to specify how to merge annotations defined at registration and execution time. +message Annotations { + // Map of custom annotations to be applied to the execution resource. + map values = 1; +} + +// Environment variable values to be applied to an execution resource. +// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +// to specify how to merge environment variables defined at registration and execution time. +message Envs { + // Map of custom environment variables to be applied to the execution resource. + repeated flyteidl.core.KeyValuePair values = 1; +} + +// Defines permissions associated with executions created by this launch plan spec. +// Use either of these roles when they have permissions required by your workflow execution. +// Deprecated. +message AuthRole { + option deprecated = true; + + // Defines an optional iam role which will be used for tasks run in executions created with this launch plan. + string assumable_iam_role = 1; + + // Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. + string kubernetes_service_account = 2; +} + + +// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). +// See https://github.com/flyteorg/flyte/issues/211 for more background information. +message RawOutputDataConfig { + // Prefix for where offloaded data from user workflows will be written + // e.g. s3://bucket/key or s3://bucket/ + string output_location_prefix = 1; +} + +// These URLs are returned as part of node and task execution data requests. +message FlyteURLs { + string inputs = 1; + string outputs = 2; + string deck = 3; +} diff --git a/flyrs/protos/flyteidl/admin/description_entity.proto b/flyrs/protos/flyteidl/admin/description_entity.proto new file mode 100644 index 0000000000..055ca0f4b6 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/description_entity.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/identifier.proto"; +import "flyteidl/admin/common.proto"; + +// DescriptionEntity contains detailed description for the task/workflow. +// Documentation could provide insight into the algorithms, business use case, etc. +message DescriptionEntity { + // id represents the unique identifier of the description entity. + core.Identifier id = 1; + // One-liner overview of the entity. + string short_description = 2; + // Full user description with formatting preserved. + Description long_description = 3; + // Optional link to source code used to define this entity. + SourceCode source_code = 4; + // User-specified tags. These are arbitrary and can be used for searching + // filtering and discovering tasks. + repeated string tags = 5; +} + +// The format of the long description +enum DescriptionFormat { + DESCRIPTION_FORMAT_UNKNOWN = 0; + DESCRIPTION_FORMAT_MARKDOWN = 1; + DESCRIPTION_FORMAT_HTML = 2; + // python default documentation - comments is rst + DESCRIPTION_FORMAT_RST = 3; +} + +// Full user description with formatting preserved. This can be rendered +// by clients, such as the console or command line tools with in-tact +// formatting. +message Description { + oneof content { + // long description - no more than 4KB + string value = 1; + // if the description sizes exceed some threshold we can offload the entire + // description proto altogether to an external data store, like S3 rather than store inline in the db + string uri = 2; + } + + // Format of the long description + DescriptionFormat format = 3; + // Optional link to an icon for the entity + string icon_link = 4; +} + +// Link to source code used to define this entity +message SourceCode { + string link = 1; +} + +// Represents a list of DescriptionEntities returned from the admin. +// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details +message DescriptionEntityList { + // A list of DescriptionEntities returned based on the request. + repeated DescriptionEntity descriptionEntities = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Represents a request structure to retrieve a list of DescriptionEntities. +// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details +message DescriptionEntityListRequest { + // Identifies the specific type of resource that this identifier corresponds to. + flyteidl.core.ResourceType resource_type = 1; + + // The identifier for the description entity. + // +required + NamedEntityIdentifier id = 2; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 3; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 4; + + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 5; + + // Sort ordering for returned list. + // +optional + Sort sort_by = 6; +} diff --git a/flyrs/protos/flyteidl/admin/event.proto b/flyrs/protos/flyteidl/admin/event.proto new file mode 100644 index 0000000000..0762ff78af --- /dev/null +++ b/flyrs/protos/flyteidl/admin/event.proto @@ -0,0 +1,70 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/event/event.proto"; + +// Indicates that a sent event was not used to update execution state due to +// the referenced execution already being terminated (and therefore ineligible +// for further state transitions). +message EventErrorAlreadyInTerminalState { + // +required + string current_phase = 1; +} + +// Indicates an event was rejected because it came from a different cluster than +// is on record as running the execution. +message EventErrorIncompatibleCluster { + // The cluster which has been recorded as processing the execution. + // +required + string cluster = 1; +} + +// Indicates why a sent event was not used to update execution. +message EventFailureReason { + // +required + oneof reason { + EventErrorAlreadyInTerminalState already_in_terminal_state = 1; + EventErrorIncompatibleCluster incompatible_cluster = 2; + } +} + +// Request to send a notification that a workflow execution event has occurred. +message WorkflowExecutionEventRequest { + // Unique ID for this request that can be traced between services + string request_id = 1; + + // Details about the event that occurred. + event.WorkflowExecutionEvent event = 2; +} + +message WorkflowExecutionEventResponse { + // Purposefully empty, may be populated in the future. +} + +// Request to send a notification that a node execution event has occurred. +message NodeExecutionEventRequest { + // Unique ID for this request that can be traced between services + string request_id = 1; + + // Details about the event that occurred. + event.NodeExecutionEvent event = 2; +} + +message NodeExecutionEventResponse { + // Purposefully empty, may be populated in the future. +} + +// Request to send a notification that a task execution event has occurred. +message TaskExecutionEventRequest { + // Unique ID for this request that can be traced between services + string request_id = 1; + + // Details about the event that occurred. + event.TaskExecutionEvent event = 2; +} + +message TaskExecutionEventResponse { + // Purposefully empty, may be populated in the future. +} diff --git a/flyrs/protos/flyteidl/admin/execution.proto b/flyrs/protos/flyteidl/admin/execution.proto new file mode 100644 index 0000000000..cc7fa1d15c --- /dev/null +++ b/flyrs/protos/flyteidl/admin/execution.proto @@ -0,0 +1,424 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/cluster_assignment.proto"; +import "flyteidl/admin/common.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/artifact_id.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/metrics.proto"; +import "flyteidl/core/security.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; +import "flyteidl/admin/matchable_resource.proto"; + +// Request to launch an execution with the given project, domain and optionally-assigned name. +message ExecutionCreateRequest { + // Name of the project the execution belongs to. + // +required + string project = 1; + + // Name of the domain the execution belongs to. + // A domain can be considered as a subset within a specific project. + // +required + string domain = 2; + + // User provided value for the resource. + // If none is provided the system will generate a unique string. + // +optional + string name = 3; + + // Additional fields necessary to launch the execution. + // +optional + ExecutionSpec spec = 4; + + // The inputs required to start the execution. All required inputs must be + // included in this map. If not required and not provided, defaults apply. + // +optional + core.LiteralMap inputs = 5; + + // Optional, org key applied to the resource. + string org = 6; +} + +// Request to relaunch the referenced execution. +message ExecutionRelaunchRequest { + // Identifier of the workflow execution to relaunch. + // +required + core.WorkflowExecutionIdentifier id = 1; + + // Deprecated field, do not use. + reserved 2; + + // User provided value for the relaunched execution. + // If none is provided the system will generate a unique string. + // +optional + string name = 3; + + // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + // If enabled, all calculations are performed even if cached results would be available, overwriting the stored + // data once execution finishes successfully. + bool overwrite_cache = 4; +} + +// Request to recover the referenced execution. +message ExecutionRecoverRequest { + // Identifier of the workflow execution to recover. + core.WorkflowExecutionIdentifier id = 1; + + // User provided value for the recovered execution. + // If none is provided the system will generate a unique string. + // +optional + string name = 2; + + // Additional metadata which will be used to overwrite any metadata in the reference execution when triggering a recovery execution. + ExecutionMetadata metadata = 3; +} + +// The unique identifier for a successfully created execution. +// If the name was *not* specified in the create request, this identifier will include a generated name. +message ExecutionCreateResponse { + core.WorkflowExecutionIdentifier id = 1; +} + +// A message used to fetch a single workflow execution entity. +// See :ref:`ref_flyteidl.admin.Execution` for more details +message WorkflowExecutionGetRequest { + // Uniquely identifies an individual workflow execution. + core.WorkflowExecutionIdentifier id = 1; +} + +// A workflow execution represents an instantiated workflow, including all inputs and additional +// metadata as well as computed results included state, outputs, and duration-based attributes. +// Used as a response object used in Get and List execution requests. +message Execution { + // Unique identifier of the workflow execution. + core.WorkflowExecutionIdentifier id = 1; + + // User-provided configuration and inputs for launching the execution. + ExecutionSpec spec = 2; + + // Execution results. + ExecutionClosure closure = 3; +} + +// Used as a response for request to list executions. +// See :ref:`ref_flyteidl.admin.Execution` for more details +message ExecutionList { + repeated Execution executions = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Input/output data can represented by actual values or a link to where values are stored +message LiteralMapBlob { + oneof data { + // Data in LiteralMap format + core.LiteralMap values = 1 [deprecated = true]; + + // In the event that the map is too large, we return a uri to the data + string uri = 2; + } +} + +// Specifies metadata around an aborted workflow execution. +message AbortMetadata { + // In the case of a user-specified abort, this will pass along the user-supplied cause. + string cause = 1; + + // Identifies the entity (if any) responsible for terminating the execution + string principal = 2; +} + +// Encapsulates the results of the Execution +message ExecutionClosure { + // A result produced by a terminated execution. + // A pending (non-terminal) execution will not have any output result. + oneof output_result { + // Output URI in the case of a successful execution. + // DEPRECATED. Use GetExecutionData to fetch output data instead. + LiteralMapBlob outputs = 1 [deprecated = true]; + + // Error information in the case of a failed execution. + core.ExecutionError error = 2; + + // In the case of a user-specified abort, this will pass along the user-supplied cause. + string abort_cause = 10 [deprecated = true]; + + // In the case of a user-specified abort, this will pass along the user and their supplied cause. + AbortMetadata abort_metadata = 12; + + // Raw output data produced by this execution. + // DEPRECATED. Use GetExecutionData to fetch output data instead. + core.LiteralMap output_data = 13 [deprecated = true]; + } + + // Inputs computed and passed for execution. + // computed_inputs depends on inputs in ExecutionSpec, fixed and default inputs in launch plan + core.LiteralMap computed_inputs = 3 [deprecated = true]; + + // Most recent recorded phase for the execution. + core.WorkflowExecution.Phase phase = 4; + + // Reported time at which the execution began running. + google.protobuf.Timestamp started_at = 5; + + // The amount of time the execution spent running. + google.protobuf.Duration duration = 6; + + // Reported time at which the execution was created. + google.protobuf.Timestamp created_at = 7; + + // Reported time at which the execution was last updated. + google.protobuf.Timestamp updated_at = 8; + + // The notification settings to use after merging the CreateExecutionRequest and the launch plan + // notification settings. An execution launched with notifications will always prefer that definition + // to notifications defined statically in a launch plan. + repeated Notification notifications = 9; + + // Identifies the workflow definition for this execution. + core.Identifier workflow_id = 11; + + // Provides the details of the last stage change + ExecutionStateChangeDetails state_change_details = 14; +} + +// Represents system, rather than user-facing, metadata about an execution. +message SystemMetadata { + + // Which execution cluster this execution ran on. + string execution_cluster = 1; + + // Which kubernetes namespace the execution ran under. + string namespace = 2; +} + +// Represents attributes about an execution which are not required to launch the execution but are useful to record. +// These attributes are assigned at launch time and do not change. +message ExecutionMetadata { + // The method by which this execution was launched. + enum ExecutionMode { + // The default execution mode, MANUAL implies that an execution was launched by an individual. + MANUAL = 0; + + // A schedule triggered this execution launch. + SCHEDULED = 1; + + // A system process was responsible for launching this execution rather an individual. + SYSTEM = 2; + + // This execution was launched with identical inputs as a previous execution. + RELAUNCH = 3; + + // This execution was triggered by another execution. + CHILD_WORKFLOW = 4; + + // This execution was recovered from another execution. + RECOVERED = 5; + + // Execution was kicked off by the artifact trigger system + TRIGGER = 6; + } + ExecutionMode mode = 1; + + // Identifier of the entity that triggered this execution. + // For systems using back-end authentication any value set here will be discarded in favor of the + // authenticated user context. + string principal = 2; + + // Indicates the nestedness of this execution. + // If a user launches a workflow execution, the default nesting is 0. + // If this execution further launches a workflow (child workflow), the nesting level is incremented by 0 => 1 + // Generally, if workflow at nesting level k launches a workflow then the child workflow will have + // nesting = k + 1. + uint32 nesting = 3; + + // For scheduled executions, the requested time for execution for this specific schedule invocation. + google.protobuf.Timestamp scheduled_at = 4; + + // Which subworkflow node (if any) launched this execution + core.NodeExecutionIdentifier parent_node_execution = 5; + + // Optional, a reference workflow execution related to this execution. + // In the case of a relaunch, this references the original workflow execution. + core.WorkflowExecutionIdentifier reference_execution = 16; + + // Optional, platform-specific metadata about the execution. + // In this the future this may be gated behind an ACL or some sort of authorization. + SystemMetadata system_metadata = 17; + + // Save a list of the artifacts used in this execution for now. This is a list only rather than a mapping + // since we don't have a structure to handle nested ones anyways. + repeated core.ArtifactID artifact_ids = 18; +} + +message NotificationList { + repeated Notification notifications = 1; +} + +// An ExecutionSpec encompasses all data used to launch this execution. The Spec does not change over the lifetime +// of an execution as it progresses across phase changes. +message ExecutionSpec { + // Launch plan to be executed + core.Identifier launch_plan = 1; + + // Input values to be passed for the execution + core.LiteralMap inputs = 2 [deprecated = true]; + + // Metadata for the execution + ExecutionMetadata metadata = 3; + + // This field is deprecated. Do not use. + reserved 4; + + oneof notification_overrides { + // List of notifications based on Execution status transitions + // When this list is not empty it is used rather than any notifications defined in the referenced launch plan. + // When this list is empty, the notifications defined for the launch plan will be applied. + NotificationList notifications = 5; + + // This should be set to true if all notifications are intended to be disabled for this execution. + bool disable_all = 6; + } + + // Labels to apply to the execution resource. + Labels labels = 7; + + // Annotations to apply to the execution resource. + Annotations annotations = 8; + + // Optional: security context override to apply this execution. + core.SecurityContext security_context = 10; + + // Optional: auth override to apply this execution. + AuthRole auth_role = 16 [deprecated = true]; + + // Indicates the runtime priority of the execution. + core.QualityOfService quality_of_service = 17; + + // Controls the maximum number of task nodes that can be run in parallel for the entire workflow. + // This is useful to achieve fairness. Note: MapTasks are regarded as one unit, + // and parallelism/concurrency of MapTasks is independent from this. + int32 max_parallelism = 18; + + // User setting to configure where to store offloaded data (i.e. Blobs, structured datasets, query data, etc.). + // This should be a prefix like s3://my-bucket/my-data + RawOutputDataConfig raw_output_data_config = 19; + + // Controls how to select an available cluster on which this execution should run. + ClusterAssignment cluster_assignment = 20; + + // Allows for the interruptible flag of a workflow to be overwritten for a single execution. + // Omitting this field uses the workflow's value as a default. + // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + // around the bool field. + google.protobuf.BoolValue interruptible = 21; + + // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + // If enabled, all calculations are performed even if cached results would be available, overwriting the stored + // data once execution finishes successfully. + bool overwrite_cache = 22; + + // Environment variables to be set for the execution. + Envs envs = 23; + + // Tags to be set for the execution. + repeated string tags = 24; + + // Execution cluster label to be set for the execution. + ExecutionClusterLabel execution_cluster_label = 25; +} + +// Request to terminate an in-progress execution. This action is irreversible. +// If an execution is already terminated, this request will simply be a no-op. +// This request will fail if it references a non-existent execution. +// If the request succeeds the phase "ABORTED" will be recorded for the termination +// with the optional cause added to the output_result. +message ExecutionTerminateRequest { + // Uniquely identifies the individual workflow execution to be terminated. + core.WorkflowExecutionIdentifier id = 1; + + // Optional reason for aborting. + string cause = 2; +} + +message ExecutionTerminateResponse { + // Purposefully empty, may be populated in the future. +} + +// Request structure to fetch inputs, output and other data produced by an execution. +// By default this data is not returned inline in :ref:`ref_flyteidl.admin.WorkflowExecutionGetRequest` +message WorkflowExecutionGetDataRequest { + // The identifier of the execution for which to fetch inputs and outputs. + core.WorkflowExecutionIdentifier id = 1; +} + +// Response structure for WorkflowExecutionGetDataRequest which contains inputs and outputs for an execution. +message WorkflowExecutionGetDataResponse { + // Signed url to fetch a core.LiteralMap of execution outputs. + // Deprecated: Please use full_outputs instead. + UrlBlob outputs = 1 [deprecated = true]; + + // Signed url to fetch a core.LiteralMap of execution inputs. + // Deprecated: Please use full_inputs instead. + UrlBlob inputs = 2 [deprecated = true]; + + // Full_inputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_inputs = 3; + + // Full_outputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_outputs = 4; +} + +// The state of the execution is used to control its visibility in the UI/CLI. +enum ExecutionState { + // By default, all executions are considered active. + EXECUTION_ACTIVE = 0; + + // Archived executions are no longer visible in the UI. + EXECUTION_ARCHIVED = 1; +} + +message ExecutionUpdateRequest { + // Identifier of the execution to update + core.WorkflowExecutionIdentifier id = 1; + + // State to set as the new value active/archive + ExecutionState state = 2; +} + +message ExecutionStateChangeDetails { + // The state of the execution is used to control its visibility in the UI/CLI. + ExecutionState state = 1; + + // This timestamp represents when the state changed. + google.protobuf.Timestamp occurred_at = 2; + + // Identifies the entity (if any) responsible for causing the state change of the execution + string principal = 3; +} + +message ExecutionUpdateResponse {} + +// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. +message WorkflowExecutionGetMetricsRequest { + // id defines the workflow execution to query for. + core.WorkflowExecutionIdentifier id = 1; + + // depth defines the number of Flyte entity levels to traverse when breaking down execution details. + int32 depth = 2; +} + +// WorkflowExecutionGetMetricsResponse represents the response containing metrics for the specified workflow execution. +message WorkflowExecutionGetMetricsResponse { + // Span defines the top-level breakdown of the workflows execution. More precise information is nested in a + // hierarchical structure using Flyte entity references. + core.Span span = 1; +} diff --git a/flyrs/protos/flyteidl/admin/launch_plan.proto b/flyrs/protos/flyteidl/admin/launch_plan.proto new file mode 100644 index 0000000000..bbb0abda22 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/launch_plan.proto @@ -0,0 +1,222 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/execution.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/interface.proto"; +import "flyteidl/core/security.proto"; +import "flyteidl/admin/schedule.proto"; +import "flyteidl/admin/common.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/wrappers.proto"; + + +// Request to register a launch plan. The included LaunchPlanSpec may have a complete or incomplete set of inputs required +// to launch a workflow execution. By default all launch plans are registered in state INACTIVE. If you wish to +// set the state to ACTIVE, you must submit a LaunchPlanUpdateRequest, after you have successfully created a launch plan. +message LaunchPlanCreateRequest { + // Uniquely identifies a launch plan entity. + core.Identifier id = 1; + + // User-provided launch plan details, including reference workflow, inputs and other metadata. + LaunchPlanSpec spec = 2; +} + +message LaunchPlanCreateResponse { + // Purposefully empty, may be populated in the future. +} + +// By default any launch plan regardless of state can be used to launch a workflow execution. +// However, at most one version of a launch plan +// (e.g. a NamedEntityIdentifier set of shared project, domain and name values) can be +// active at a time in regards to *schedules*. That is, at most one schedule in a NamedEntityIdentifier +// group will be observed and trigger executions at a defined cadence. +enum LaunchPlanState { + INACTIVE = 0; + ACTIVE = 1; +} + +// A LaunchPlan provides the capability to templatize workflow executions. +// Launch plans simplify associating one or more schedules, inputs and notifications with your workflows. +// Launch plans can be shared and used to trigger executions with predefined inputs even when a workflow +// definition doesn't necessarily have a default value for said input. +message LaunchPlan { + // Uniquely identifies a launch plan entity. + core.Identifier id = 1; + + // User-provided launch plan details, including reference workflow, inputs and other metadata. + LaunchPlanSpec spec = 2; + + // Values computed by the flyte platform after launch plan registration. + LaunchPlanClosure closure = 3; +} + +// Response object for list launch plan requests. +// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +message LaunchPlanList { + repeated LaunchPlan launch_plans = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Defines permissions associated with executions created by this launch plan spec. +// Use either of these roles when they have permissions required by your workflow execution. +// Deprecated. +message Auth { + option deprecated = true; + + // Defines an optional iam role which will be used for tasks run in executions created with this launch plan. + string assumable_iam_role = 1; + + // Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. + string kubernetes_service_account = 2; +} + +// User-provided launch plan definition and configuration values. +message LaunchPlanSpec { + // Reference to the Workflow template that the launch plan references + core.Identifier workflow_id = 1; + + // Metadata for the Launch Plan + LaunchPlanMetadata entity_metadata = 2; + + // Input values to be passed for the execution. + // These can be overridden when an execution is created with this launch plan. + core.ParameterMap default_inputs = 3; + + // Fixed, non-overridable inputs for the Launch Plan. + // These can not be overridden when an execution is created with this launch plan. + core.LiteralMap fixed_inputs = 4; + + // String to indicate the role to use to execute the workflow underneath + string role = 5 [deprecated = true]; + + // Custom labels to be applied to the execution resource. + Labels labels = 6; + + // Custom annotations to be applied to the execution resource. + Annotations annotations = 7; + + // Indicates the permission associated with workflow executions triggered with this launch plan. + Auth auth = 8 [deprecated = true]; + + AuthRole auth_role = 9 [deprecated = true]; + + // Indicates security context for permissions triggered with this launch plan + core.SecurityContext security_context = 10; + + // Indicates the runtime priority of the execution. + core.QualityOfService quality_of_service = 16; + + // Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). + RawOutputDataConfig raw_output_data_config = 17; + + // Controls the maximum number of tasknodes that can be run in parallel for the entire workflow. + // This is useful to achieve fairness. Note: MapTasks are regarded as one unit, + // and parallelism/concurrency of MapTasks is independent from this. + int32 max_parallelism = 18; + + // Allows for the interruptible flag of a workflow to be overwritten for a single execution. + // Omitting this field uses the workflow's value as a default. + // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + // around the bool field. + google.protobuf.BoolValue interruptible = 19; + + // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + // If enabled, all calculations are performed even if cached results would be available, overwriting the stored + // data once execution finishes successfully. + bool overwrite_cache = 20; + + // Environment variables to be set for the execution. + Envs envs = 21; +} + +// Values computed by the flyte platform after launch plan registration. +// These include expected_inputs required to be present in a CreateExecutionRequest +// to launch the reference workflow as well timestamp values associated with the launch plan. +message LaunchPlanClosure { + // Indicate the Launch plan state. + LaunchPlanState state = 1; + + // Indicates the set of inputs expected when creating an execution with the Launch plan + core.ParameterMap expected_inputs = 2; + + // Indicates the set of outputs expected to be produced by creating an execution with the Launch plan + core.VariableMap expected_outputs = 3; + + // Time at which the launch plan was created. + google.protobuf.Timestamp created_at = 4; + + // Time at which the launch plan was last updated. + google.protobuf.Timestamp updated_at = 5; +} + +// Additional launch plan attributes included in the LaunchPlanSpec not strictly required to launch +// the reference workflow. +message LaunchPlanMetadata { + // Schedule to execute the Launch Plan + Schedule schedule = 1; + + // List of notifications based on Execution status transitions + repeated Notification notifications = 2; + + // Additional metadata for how to launch the launch plan + google.protobuf.Any launch_conditions = 3; +} + +// Request to set the referenced launch plan state to the configured value. +// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +message LaunchPlanUpdateRequest { + // Identifier of launch plan for which to change state. + // +required. + core.Identifier id = 1; + + // Desired state to apply to the launch plan. + // +required. + LaunchPlanState state = 2; +} + +// Purposefully empty, may be populated in the future. +message LaunchPlanUpdateResponse { +} + +// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier +// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +message ActiveLaunchPlanRequest { + // +required. + NamedEntityIdentifier id = 1; +} + +// Represents a request structure to list active launch plans within a project/domain and optional org. +// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +message ActiveLaunchPlanListRequest { + // Name of the project that contains the identifiers. + // +required. + string project = 1; + + // Name of the domain the identifiers belongs to within the project. + // +required. + string domain = 2; + + // Indicates the number of resources to be returned. + // +required. + uint32 limit = 3; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 4; + + // Sort ordering. + // +optional + Sort sort_by = 5; + + // Optional, org key applied to the resource. + string org = 6; +} diff --git a/flyrs/protos/flyteidl/admin/matchable_resource.proto b/flyrs/protos/flyteidl/admin/matchable_resource.proto new file mode 100644 index 0000000000..e379bf1573 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/matchable_resource.proto @@ -0,0 +1,190 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/common.proto"; +import "flyteidl/admin/cluster_assignment.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/security.proto"; +import "google/protobuf/wrappers.proto"; + +// Defines a resource that can be configured by customizable Project-, ProjectDomain- or WorkflowAttributes +// based on matching tags. +enum MatchableResource { + // Applies to customizable task resource requests and limits. + TASK_RESOURCE = 0; + + // Applies to configuring templated kubernetes cluster resources. + CLUSTER_RESOURCE = 1; + + // Configures task and dynamic task execution queue assignment. + EXECUTION_QUEUE = 2; + + // Configures the K8s cluster label to be used for execution to be run + EXECUTION_CLUSTER_LABEL = 3; + + // Configures default quality of service when undefined in an execution spec. + QUALITY_OF_SERVICE_SPECIFICATION = 4; + + // Selects configurable plugin implementation behavior for a given task type. + PLUGIN_OVERRIDE = 5; + + // Adds defaults for customizable workflow-execution specifications and overrides. + WORKFLOW_EXECUTION_CONFIG = 6; + + // Controls how to select an available cluster on which this execution should run. + CLUSTER_ASSIGNMENT = 7; +} + +// Defines a set of overridable task resource attributes set during task registration. +message TaskResourceSpec { + string cpu = 1; + + string gpu = 2; + + string memory = 3; + + string storage = 4; + + string ephemeral_storage = 5; +} + +// Defines task resource defaults and limits that will be applied at task registration. +message TaskResourceAttributes { + TaskResourceSpec defaults = 1; + + TaskResourceSpec limits = 2; +} + +message ClusterResourceAttributes { + // Custom resource attributes which will be applied in cluster resource creation (e.g. quotas). + // Map keys are the *case-sensitive* names of variables in templatized resource files. + // Map values should be the custom values which get substituted during resource creation. + map attributes = 1; +} + +message ExecutionQueueAttributes { + // Tags used for assigning execution queues for tasks defined within this project. + repeated string tags = 1; +} + +message ExecutionClusterLabel { + // Label value to determine where the execution will be run + string value = 1; +} + +// This MatchableAttribute configures selecting alternate plugin implementations for a given task type. +// In addition to an override implementation a selection of fallbacks can be provided or other modes +// for handling cases where the desired plugin override is not enabled in a given Flyte deployment. +message PluginOverride { + // A predefined yet extensible Task type identifier. + string task_type = 1; + + // A set of plugin ids which should handle tasks of this type instead of the default registered plugin. The list will be tried in order until a plugin is found with that id. + repeated string plugin_id = 2; + + enum MissingPluginBehavior { + // By default, if this plugin is not enabled for a Flyte deployment then execution will fail. + FAIL = 0; + + // Uses the system-configured default implementation. + USE_DEFAULT = 1; + } + + // Defines the behavior when no plugin from the plugin_id list is not found. + MissingPluginBehavior missing_plugin_behavior = 4; +} + + +message PluginOverrides { + repeated PluginOverride overrides = 1; +} + +// Adds defaults for customizable workflow-execution specifications and overrides. +message WorkflowExecutionConfig { + // Can be used to control the number of parallel nodes to run within the workflow. This is useful to achieve fairness. + int32 max_parallelism = 1; + + // Indicates security context permissions for executions triggered with this matchable attribute. + core.SecurityContext security_context = 2; + + // Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). + RawOutputDataConfig raw_output_data_config = 3; + + // Custom labels to be applied to a triggered execution resource. + Labels labels = 4; + + // Custom annotations to be applied to a triggered execution resource. + Annotations annotations = 5; + + // Allows for the interruptible flag of a workflow to be overwritten for a single execution. + // Omitting this field uses the workflow's value as a default. + // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + // around the bool field. + google.protobuf.BoolValue interruptible = 6; + + // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + // If enabled, all calculations are performed even if cached results would be available, overwriting the stored + // data once execution finishes successfully. + bool overwrite_cache = 7; + + // Environment variables to be set for the execution. + Envs envs = 8; +} + +// Generic container for encapsulating all types of the above attributes messages. +message MatchingAttributes { + oneof target { + TaskResourceAttributes task_resource_attributes = 1; + + ClusterResourceAttributes cluster_resource_attributes = 2; + + ExecutionQueueAttributes execution_queue_attributes = 3; + + ExecutionClusterLabel execution_cluster_label = 4; + + core.QualityOfService quality_of_service = 5; + + PluginOverrides plugin_overrides = 6; + + WorkflowExecutionConfig workflow_execution_config = 7; + + ClusterAssignment cluster_assignment = 8; + } +} + +// Represents a custom set of attributes applied for either a domain (and optional org); a domain and project (and optional org); +// or domain, project and workflow name (and optional org). +// These are used to override system level defaults for kubernetes cluster resource management, +// default execution values, and more all across different levels of specificity. +message MatchableAttributesConfiguration { + MatchingAttributes attributes = 1; + + string domain = 2; + + string project = 3; + + string workflow = 4; + + string launch_plan = 5; + + // Optional, org key applied to the resource. + string org = 6; +} + +// Request all matching resource attributes for a resource type. +// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details +message ListMatchableAttributesRequest { + // +required + MatchableResource resource_type = 1; + + // Optional, org filter applied to list project requests. + string org = 2; +} + +// Response for a request for all matching resource attributes for a resource type. +// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details +message ListMatchableAttributesResponse { + repeated MatchableAttributesConfiguration configurations = 1; +} diff --git a/flyrs/protos/flyteidl/admin/node_execution.proto b/flyrs/protos/flyteidl/admin/node_execution.proto new file mode 100644 index 0000000000..ac672ad49f --- /dev/null +++ b/flyrs/protos/flyteidl/admin/node_execution.proto @@ -0,0 +1,245 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/common.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/catalog.proto"; +import "flyteidl/core/compiler.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/literals.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +// A message used to fetch a single node execution entity. +// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +message NodeExecutionGetRequest { + + // Uniquely identifies an individual node execution. + // +required + core.NodeExecutionIdentifier id = 1; +} + +// Represents a request structure to retrieve a list of node execution entities. +// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +message NodeExecutionListRequest { + // Indicates the workflow execution to filter by. + // +required + core.WorkflowExecutionIdentifier workflow_execution_id = 1; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 2; + + // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page + // in a query. + // +optional + + string token = 3; + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 4; + + // Sort ordering. + // +optional + Sort sort_by = 5; + + // Unique identifier of the parent node in the execution + // +optional + string unique_parent_id = 6; +} + +// Represents a request structure to retrieve a list of node execution entities launched by a specific task. +// This can arise when a task yields a subworkflow. +message NodeExecutionForTaskListRequest { + // Indicates the node execution to filter by. + // +required + core.TaskExecutionIdentifier task_execution_id = 1; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 2; + + // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 3; + + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 4; + + // Sort ordering. + // +optional + Sort sort_by = 5; +} + +// Encapsulates all details for a single node execution entity. +// A node represents a component in the overall workflow graph. A node launch a task, multiple tasks, an entire nested +// sub-workflow, or even a separate child-workflow execution. +// The same task can be called repeatedly in a single workflow but each node is unique. +message NodeExecution { + + // Uniquely identifies an individual node execution. + core.NodeExecutionIdentifier id = 1; + + // Path to remote data store where input blob is stored. + string input_uri = 2; + + // Computed results associated with this node execution. + NodeExecutionClosure closure = 3; + + // Metadata for Node Execution + NodeExecutionMetaData metadata = 4; +} + +// Represents additional attributes related to a Node Execution +message NodeExecutionMetaData { + // Node executions are grouped depending on retries of the parent + // Retry group is unique within the context of a parent node. + string retry_group = 1; + + // Boolean flag indicating if the node has child nodes under it + // This can be true when a node contains a dynamic workflow which then produces + // child nodes. + bool is_parent_node = 2; + + // Node id of the node in the original workflow + // This maps to value of WorkflowTemplate.nodes[X].id + string spec_node_id = 3; + + // Boolean flag indicating if the node has contains a dynamic workflow which then produces child nodes. + // This is to distinguish between subworkflows and dynamic workflows which can both have is_parent_node as true. + bool is_dynamic = 4; + + // Boolean flag indicating if the node is an array node. This is intended to uniquely identify + // array nodes from other nodes which can have is_parent_node as true. + bool is_array = 5; +} + +// Request structure to retrieve a list of node execution entities. +// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +message NodeExecutionList { + repeated NodeExecution node_executions = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Container for node execution details and results. +message NodeExecutionClosure { + // Only a node in a terminal state will have a non-empty output_result. + oneof output_result { + // Links to a remotely stored, serialized core.LiteralMap of node execution outputs. + // DEPRECATED. Use GetNodeExecutionData to fetch output data instead. + string output_uri = 1 [deprecated = true]; + + // Error information for the Node + core.ExecutionError error = 2; + + // Raw output data produced by this node execution. + // DEPRECATED. Use GetNodeExecutionData to fetch output data instead. + core.LiteralMap output_data = 10 [deprecated = true]; + } + + // The last recorded phase for this node execution. + core.NodeExecution.Phase phase = 3; + + // Time at which the node execution began running. + google.protobuf.Timestamp started_at = 4; + + // The amount of time the node execution spent running. + google.protobuf.Duration duration = 5; + + // Time at which the node execution was created. + google.protobuf.Timestamp created_at = 6; + + // Time at which the node execution was last updated. + google.protobuf.Timestamp updated_at = 7; + + // Store metadata for what the node launched. + // for ex: if this is a workflow node, we store information for the launched workflow. + oneof target_metadata { + WorkflowNodeMetadata workflow_node_metadata = 8; + TaskNodeMetadata task_node_metadata = 9; + } + + // String location uniquely identifying where the deck HTML file is. + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + string deck_uri = 11; + + // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for a DynamicWorkflow. This is required + // to correctly recover partially completed executions where the subworkflow has already been compiled. + string dynamic_job_spec_uri = 12; +} + +// Metadata for a WorkflowNode +message WorkflowNodeMetadata { + // The identifier for a workflow execution launched by a node. + core.WorkflowExecutionIdentifier executionId = 1; +} + +// Metadata for the case in which the node is a TaskNode +message TaskNodeMetadata { + // Captures the status of caching for this execution. + core.CatalogCacheStatus cache_status = 1; + // This structure carries the catalog artifact information + core.CatalogMetadata catalog_key = 2; + // The latest checkpoint location + string checkpoint_uri = 4; +} + +// For dynamic workflow nodes we capture information about the dynamic workflow definition that gets generated. +message DynamicWorkflowNodeMetadata { + // id represents the unique identifier of the workflow. + core.Identifier id = 1; + + // Represents the compiled representation of the embedded dynamic workflow. + core.CompiledWorkflowClosure compiled_workflow = 2; + + // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is + // required to correctly recover partially completed executions where the subworkflow has already been compiled. + string dynamic_job_spec_uri = 3; +} + +// Request structure to fetch inputs and output for a node execution. +// By default, these are not returned in :ref:`ref_flyteidl.admin.NodeExecutionGetRequest` +message NodeExecutionGetDataRequest { + // The identifier of the node execution for which to fetch inputs and outputs. + core.NodeExecutionIdentifier id = 1; +} + +// Response structure for NodeExecutionGetDataRequest which contains inputs and outputs for a node execution. +message NodeExecutionGetDataResponse { + // Signed url to fetch a core.LiteralMap of node execution inputs. + // Deprecated: Please use full_inputs instead. + UrlBlob inputs = 1 [deprecated = true]; + + // Signed url to fetch a core.LiteralMap of node execution outputs. + // Deprecated: Please use full_outputs instead. + UrlBlob outputs = 2 [deprecated = true]; + + // Full_inputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_inputs = 3; + + // Full_outputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_outputs = 4; + + // Optional Workflow closure for a dynamically generated workflow, in the case this node yields a dynamic workflow we return its structure here. + DynamicWorkflowNodeMetadata dynamic_workflow = 16; + + FlyteURLs flyte_urls = 17; + +} + +message GetDynamicNodeWorkflowRequest { + core.NodeExecutionIdentifier id = 1; +} + +message DynamicNodeWorkflowResponse { + core.CompiledWorkflowClosure compiled_workflow = 1; +} diff --git a/flyrs/protos/flyteidl/admin/notification.proto b/flyrs/protos/flyteidl/admin/notification.proto new file mode 100644 index 0000000000..9ef54c9794 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/notification.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package flyteidl.admin; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +// Represents the Email object that is sent to a publisher/subscriber +// to forward the notification. +// Note: This is internal to Admin and doesn't need to be exposed to other components. +message EmailMessage { + // The list of email addresses to receive an email with the content populated in the other fields. + // Currently, each email recipient will receive its own email. + // This populates the TO field. + repeated string recipients_email = 1; + + // The email of the sender. + // This populates the FROM field. + string sender_email = 2; + + // The content of the subject line. + // This populates the SUBJECT field. + string subject_line = 3; + + // The content of the email body. + // This populates the BODY field. + string body = 4; +} diff --git a/flyrs/protos/flyteidl/admin/project.proto b/flyrs/protos/flyteidl/admin/project.proto new file mode 100644 index 0000000000..62e61b032d --- /dev/null +++ b/flyrs/protos/flyteidl/admin/project.proto @@ -0,0 +1,110 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + + +import "flyteidl/admin/common.proto"; + +// Namespace within a project commonly used to differentiate between different service instances. +// e.g. "production", "development", etc. +message Domain { + // Globally unique domain name. + string id = 1; + + // Display name. + string name = 2; +} + + +// Top-level namespace used to classify different entities like workflows and executions. +message Project { + // Globally unique project name. + string id = 1; + + // Display name. + string name = 2; + + repeated Domain domains = 3; + + string description = 4; + + // Leverage Labels from flyteidl.admin.common.proto to + // tag projects with ownership information. + Labels labels = 5; + + // The state of the project is used to control its visibility in the UI and validity. + enum ProjectState { + // By default, all projects are considered active. + ACTIVE = 0; + + // Archived projects are no longer visible in the UI and no longer valid. + ARCHIVED = 1; + + // System generated projects that aren't explicitly created or managed by a user. + SYSTEM_GENERATED = 2; + } + ProjectState state = 6; + + // Optional, org key applied to the resource. + string org = 7; +} + +// Represents a list of projects. +// See :ref:`ref_flyteidl.admin.Project` for more details +message Projects { + repeated Project projects = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Request to retrieve a list of projects matching specified filters. +// See :ref:`ref_flyteidl.admin.Project` for more details +message ProjectListRequest { + // Indicates the number of projects to be returned. + // +required + uint32 limit = 1; + + // In the case of multiple pages of results, this server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 2; + + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 3; + + // Sort ordering. + // +optional + Sort sort_by = 4; + + // Optional, org filter applied to list project requests. + string org = 5; +} + +// Adds a new user-project within the Flyte deployment. +// See :ref:`ref_flyteidl.admin.Project` for more details +message ProjectRegisterRequest { + // +required + Project project = 1; +} + +// Purposefully empty, may be updated in the future. +message ProjectRegisterResponse { +} + +// Purposefully empty, may be updated in the future. +message ProjectUpdateResponse { +} + +message ProjectGetRequest { + // Indicates a unique project. + // +required + string id = 1; + + // Optional, org key applied to the resource. + string org = 2; +} diff --git a/flyrs/protos/flyteidl/admin/project_attributes.proto b/flyrs/protos/flyteidl/admin/project_attributes.proto new file mode 100644 index 0000000000..2656ab25f5 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/project_attributes.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/matchable_resource.proto"; + +// Defines a set of custom matching attributes at the project level. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectAttributes { + // Unique project id for which this set of attributes will be applied. + string project = 1; + + MatchingAttributes matching_attributes = 2; + + // Optional, org key applied to the project. + string org = 3; +} + +// Sets custom attributes for a project +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectAttributesUpdateRequest { + // +required + ProjectAttributes attributes = 1; +} + +// Purposefully empty, may be populated in the future. +message ProjectAttributesUpdateResponse { +} + +// Request to get an individual project level attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectAttributesGetRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Which type of matchable attributes to return. + // +required + MatchableResource resource_type = 2; + + // Optional, org key applied to the project. + string org = 3; +} + +// Response to get an individual project level attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectAttributesGetResponse { + ProjectAttributes attributes = 1; +} + +// Request to delete a set matchable project level attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectAttributesDeleteRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Which type of matchable attributes to delete. + // +required + MatchableResource resource_type = 2; + + // Optional, org key applied to the project. + string org = 3; +} + +// Purposefully empty, may be populated in the future. +message ProjectAttributesDeleteResponse { +} diff --git a/flyrs/protos/flyteidl/admin/project_domain_attributes.proto b/flyrs/protos/flyteidl/admin/project_domain_attributes.proto new file mode 100644 index 0000000000..b493ae1178 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/project_domain_attributes.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/matchable_resource.proto"; + +// Defines a set of custom matching attributes which defines resource defaults for a project and domain. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectDomainAttributes { + // Unique project id for which this set of attributes will be applied. + string project = 1; + + // Unique domain id for which this set of attributes will be applied. + string domain = 2; + + MatchingAttributes matching_attributes = 3; + + // Optional, org key applied to the attributes. + string org = 4; +} + +// Sets custom attributes for a project-domain combination. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectDomainAttributesUpdateRequest { + // +required + ProjectDomainAttributes attributes = 1; +} + +// Purposefully empty, may be populated in the future. +message ProjectDomainAttributesUpdateResponse { +} + +// Request to get an individual project domain attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectDomainAttributesGetRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Unique domain id which this set of attributes references. + // +required + string domain = 2; + + // Which type of matchable attributes to return. + // +required + MatchableResource resource_type = 3; + + // Optional, org key applied to the attributes. + string org = 4; +} + +// Response to get an individual project domain attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectDomainAttributesGetResponse { + ProjectDomainAttributes attributes = 1; +} + +// Request to delete a set matchable project domain attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message ProjectDomainAttributesDeleteRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Unique domain id which this set of attributes references. + // +required + string domain = 2; + + // Which type of matchable attributes to delete. + // +required + MatchableResource resource_type = 3; + + // Optional, org key applied to the attributes. + string org = 4; +} + +// Purposefully empty, may be populated in the future. +message ProjectDomainAttributesDeleteResponse { +} diff --git a/flyrs/protos/flyteidl/admin/schedule.proto b/flyrs/protos/flyteidl/admin/schedule.proto new file mode 100644 index 0000000000..6bcbd90140 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/schedule.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +// Represents a frequency at which to run a schedule. +enum FixedRateUnit { + MINUTE = 0; + HOUR = 1; + DAY = 2; +} + +// Option for schedules run at a certain frequency e.g. every 2 minutes. +message FixedRate { + uint32 value = 1; + FixedRateUnit unit = 2; +} + +// Options for schedules to run according to a cron expression. +message CronSchedule { + // Standard/default cron implementation as described by https://en.wikipedia.org/wiki/Cron#CRON_expression; + // Also supports nonstandard predefined scheduling definitions + // as described by https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions + // except @reboot + string schedule = 1; + // ISO 8601 duration as described by https://en.wikipedia.org/wiki/ISO_8601#Durations + string offset = 2; +} + +// Defines complete set of information required to trigger an execution on a schedule. +message Schedule { + + oneof ScheduleExpression { + // Uses AWS syntax: Minutes Hours Day-of-month Month Day-of-week Year + // e.g. for a schedule that runs every 15 minutes: 0/15 * * * ? * + string cron_expression = 1 [deprecated=true]; + FixedRate rate = 2; + CronSchedule cron_schedule = 4; + } + + // Name of the input variable that the kickoff time will be supplied to when the workflow is kicked off. + string kickoff_time_input_arg = 3; +} diff --git a/flyrs/protos/flyteidl/admin/signal.proto b/flyrs/protos/flyteidl/admin/signal.proto new file mode 100644 index 0000000000..aad8437bc2 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/signal.proto @@ -0,0 +1,86 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/common.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/types.proto"; + +// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. +// See :ref:`ref_flyteidl.admin.Signal` for more details +message SignalGetOrCreateRequest { + // A unique identifier for the requested signal. + core.SignalIdentifier id = 1; + + // A type denoting the required value type for this signal. + core.LiteralType type = 2; +} + +// SignalListRequest represents a request structure to retrieve a collection of signals. +// See :ref:`ref_flyteidl.admin.Signal` for more details +message SignalListRequest { + // Indicates the workflow execution to filter by. + // +required + core.WorkflowExecutionIdentifier workflow_execution_id = 1; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 2; + + // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 3; + + // Indicates a list of filters passed as string. + // +optional + string filters = 4; + + // Sort ordering. + // +optional + Sort sort_by = 5; +} + +// SignalList represents collection of signals along with the token of the last result. +// See :ref:`ref_flyteidl.admin.Signal` for more details +message SignalList { + // A list of signals matching the input filters. + repeated Signal signals = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// SignalSetRequest represents a request structure to set the value on a signal. Setting a signal +// effetively satisfies the signal condition within a Flyte workflow. +// See :ref:`ref_flyteidl.admin.Signal` for more details +message SignalSetRequest { + // A unique identifier for the requested signal. + core.SignalIdentifier id = 1; + + // The value of this signal, must match the defining signal type. + core.Literal value = 2; +} + +// SignalSetResponse represents a response structure if signal setting succeeds. +message SignalSetResponse { + // Purposefully empty, may be populated in the future. +} + +// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte +// signal. Signals may exist either without a set value (representing a signal request) or with a +// populated value (indicating the signal has been given). +message Signal { + // A unique identifier for the requested signal. + core.SignalIdentifier id = 1; + + // A type denoting the required value type for this signal. + core.LiteralType type = 2; + + // The value of the signal. This is only available if the signal has been "set" and must match + // the defined the type. + core.Literal value = 3; +} diff --git a/flyrs/protos/flyteidl/admin/task.proto b/flyrs/protos/flyteidl/admin/task.proto new file mode 100644 index 0000000000..6185d6fbba --- /dev/null +++ b/flyrs/protos/flyteidl/admin/task.proto @@ -0,0 +1,71 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/tasks.proto"; +import "flyteidl/core/compiler.proto"; +import "flyteidl/admin/description_entity.proto"; +import "google/protobuf/timestamp.proto"; + +// Represents a request structure to create a revision of a task. +// See :ref:`ref_flyteidl.admin.Task` for more details +message TaskCreateRequest { + // id represents the unique identifier of the task. + // +required + core.Identifier id = 1; + + // Represents the specification for task. + // +required + TaskSpec spec = 2; +} + +// Represents a response structure if task creation succeeds. +message TaskCreateResponse { + // Purposefully empty, may be populated in the future. +} + +// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks +// arranged to process workflow inputs and produce a deterministic set of outputs. +// Tasks can come in many varieties tuned for specialized behavior. +message Task { + // id represents the unique identifier of the task. + core.Identifier id = 1; + + // closure encapsulates all the fields that maps to a compiled version of the task. + TaskClosure closure = 2; + + // One-liner overview of the entity. + string short_description = 3; +} + +// Represents a list of tasks returned from the admin. +// See :ref:`ref_flyteidl.admin.Task` for more details +message TaskList { + // A list of tasks returned based on the request. + repeated Task tasks = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Represents a structure that encapsulates the user-configured specification of the task. +message TaskSpec { + // Template of the task that encapsulates all the metadata of the task. + core.TaskTemplate template = 1; + + // Represents the specification for description entity. + DescriptionEntity description = 2; +} + +// Compute task attributes which include values derived from the TaskSpec, as well as plugin-specific data +// and task metadata. +message TaskClosure { + // Represents the compiled representation of the task from the specification provided. + core.CompiledTask compiled_task = 1; + + // Time at which the task was created. + google.protobuf.Timestamp created_at = 2; +} diff --git a/flyrs/protos/flyteidl/admin/task_execution.proto b/flyrs/protos/flyteidl/admin/task_execution.proto new file mode 100644 index 0000000000..54d2ff1e61 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/task_execution.proto @@ -0,0 +1,168 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/common.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/event/event.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +// A message used to fetch a single task execution entity. +// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +message TaskExecutionGetRequest { + // Unique identifier for the task execution. + // +required + core.TaskExecutionIdentifier id = 1; +} + +// Represents a request structure to retrieve a list of task execution entities yielded by a specific node execution. +// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +message TaskExecutionListRequest { + // Indicates the node execution to filter by. + // +required + core.NodeExecutionIdentifier node_execution_id = 1; + + // Indicates the number of resources to be returned. + // +required + uint32 limit = 2; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. + // +optional + string token = 3; + + // Indicates a list of filters passed as string. + // More info on constructing filters : + // +optional + string filters = 4; + + // Sort ordering for returned list. + // +optional + Sort sort_by = 5; +} + +// Encapsulates all details for a single task execution entity. +// A task execution represents an instantiated task, including all inputs and additional +// metadata as well as computed results included state, outputs, and duration-based attributes. +message TaskExecution { + // Unique identifier for the task execution. + core.TaskExecutionIdentifier id = 1; + + // Path to remote data store where input blob is stored. + string input_uri = 2; + + // Task execution details and results. + TaskExecutionClosure closure = 3; + + // Whether this task spawned nodes. + bool is_parent = 4; +} + +// Response structure for a query to list of task execution entities. +// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +message TaskExecutionList { + repeated TaskExecution task_executions = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Container for task execution details and results. +message TaskExecutionClosure { + oneof output_result { + // Path to remote data store where output blob is stored if the execution succeeded (and produced outputs). + // DEPRECATED. Use GetTaskExecutionData to fetch output data instead. + string output_uri = 1 [deprecated = true]; + + // Error information for the task execution. Populated if the execution failed. + core.ExecutionError error = 2; + + // Raw output data produced by this task execution. + // DEPRECATED. Use GetTaskExecutionData to fetch output data instead. + core.LiteralMap output_data = 12 [deprecated = true]; + } + + // The last recorded phase for this task execution. + core.TaskExecution.Phase phase = 3; + + // Detailed log information output by the task execution. + repeated core.TaskLog logs = 4; + + // Time at which the task execution began running. + google.protobuf.Timestamp started_at = 5; + + // The amount of time the task execution spent running. + google.protobuf.Duration duration = 6; + + // Time at which the task execution was created. + google.protobuf.Timestamp created_at = 7; + + // Time at which the task execution was last updated. + google.protobuf.Timestamp updated_at = 8; + + // Custom data specific to the task plugin. + google.protobuf.Struct custom_info = 9; + + // If there is an explanation for the most recent phase transition, the reason will capture it. + string reason = 10; + + // A predefined yet extensible Task type identifier. + string task_type = 11; + + // Metadata around how a task was executed. + event.TaskExecutionMetadata metadata = 16; + + // The event version is used to indicate versioned changes in how data is maintained using this + // proto message. For example, event_verison > 0 means that maps tasks logs use the + // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + // in this message. + int32 event_version = 17; + + // A time-series of the phase transition or update explanations. This, when compared to storing a singular reason + // as previously done, is much more valuable in visualizing and understanding historical evaluations. + repeated Reason reasons = 18; +} + +// Reason is a single message annotated with a timestamp to indicate the instant the reason occurred. +message Reason { + // occurred_at is the timestamp indicating the instant that this reason happened. + google.protobuf.Timestamp occurred_at = 1; + + // message is the explanation for the most recent phase transition or status update. + string message = 2; +} + +// Request structure to fetch inputs and output for a task execution. +// By default this data is not returned inline in :ref:`ref_flyteidl.admin.TaskExecutionGetRequest` +message TaskExecutionGetDataRequest { + // The identifier of the task execution for which to fetch inputs and outputs. + // +required + core.TaskExecutionIdentifier id = 1; +} + +// Response structure for TaskExecutionGetDataRequest which contains inputs and outputs for a task execution. +message TaskExecutionGetDataResponse { + // Signed url to fetch a core.LiteralMap of task execution inputs. + // Deprecated: Please use full_inputs instead. + UrlBlob inputs = 1 [deprecated = true]; + + // Signed url to fetch a core.LiteralMap of task execution outputs. + // Deprecated: Please use full_outputs instead. + UrlBlob outputs = 2 [deprecated = true]; + + // Full_inputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_inputs = 3; + + // Full_outputs will only be populated if they are under a configured size threshold. + core.LiteralMap full_outputs = 4; + + // flyte tiny url to fetch a core.LiteralMap of task execution's IO + // Deck will be empty for task + FlyteURLs flyte_urls = 5; +} diff --git a/flyrs/protos/flyteidl/admin/version.proto b/flyrs/protos/flyteidl/admin/version.proto new file mode 100644 index 0000000000..e0e38bda1f --- /dev/null +++ b/flyrs/protos/flyteidl/admin/version.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +// Response for the GetVersion API +message GetVersionResponse { + // The control plane version information. FlyteAdmin and related components + // form the control plane of Flyte + Version control_plane_version = 1; +} + +// Provides Version information for a component +message Version { + // Specifies the GIT sha of the build + string Build = 1; + + // Version for the build, should follow a semver + string Version = 2; + + // Build timestamp + string BuildTime = 3; +} + +// Empty request for GetVersion +message GetVersionRequest { +} diff --git a/flyrs/protos/flyteidl/admin/workflow.proto b/flyrs/protos/flyteidl/admin/workflow.proto new file mode 100644 index 0000000000..b090f30ea8 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/workflow.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/core/compiler.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/workflow.proto"; +import "flyteidl/admin/description_entity.proto"; +import "google/protobuf/timestamp.proto"; + +// Represents a request structure to create a revision of a workflow. +// See :ref:`ref_flyteidl.admin.Workflow` for more details +message WorkflowCreateRequest { + // id represents the unique identifier of the workflow. + // +required + core.Identifier id = 1; + + // Represents the specification for workflow. + // +required + WorkflowSpec spec = 2; +} + +message WorkflowCreateResponse { + // Purposefully empty, may be populated in the future. +} + +// Represents the workflow structure stored in the Admin +// A workflow is created by ordering tasks and associating outputs to inputs +// in order to produce a directed-acyclic execution graph. +message Workflow { + // id represents the unique identifier of the workflow. + core.Identifier id = 1; + + // closure encapsulates all the fields that maps to a compiled version of the workflow. + WorkflowClosure closure = 2; + + // One-liner overview of the entity. + string short_description = 3; +} + +// Represents a list of workflows returned from the admin. +// See :ref:`ref_flyteidl.admin.Workflow` for more details +message WorkflowList { + // A list of workflows returned based on the request. + repeated Workflow workflows = 1; + + // In the case of multiple pages of results, the server-provided token can be used to fetch the next page + // in a query. If there are no more results, this value will be empty. + string token = 2; +} + +// Represents a structure that encapsulates the specification of the workflow. +message WorkflowSpec { + // Template of the task that encapsulates all the metadata of the workflow. + core.WorkflowTemplate template = 1; + + // Workflows that are embedded into other workflows need to be passed alongside the parent workflow to the + // propeller compiler (since the compiler doesn't have any knowledge of other workflows - ie, it doesn't reach out + // to Admin to see other registered workflows). In fact, subworkflows do not even need to be registered. + repeated core.WorkflowTemplate sub_workflows = 2; + + // Represents the specification for description entity. + DescriptionEntity description = 3; +} + +// A container holding the compiled workflow produced from the WorkflowSpec and additional metadata. +message WorkflowClosure { + // Represents the compiled representation of the workflow from the specification provided. + core.CompiledWorkflowClosure compiled_workflow = 1; + + // Time at which the workflow was created. + google.protobuf.Timestamp created_at = 2; +} + +// The workflow id is already used and the structure is different +message WorkflowErrorExistsDifferentStructure { + core.Identifier id = 1; +} + +// The workflow id is already used with an identical sctructure +message WorkflowErrorExistsIdenticalStructure { + core.Identifier id = 1; +} + +// When a CreateWorkflowRequest fails due to matching id +message CreateWorkflowFailureReason { + oneof reason { + WorkflowErrorExistsDifferentStructure exists_different_structure = 1; + WorkflowErrorExistsIdenticalStructure exists_identical_structure = 2; + } +} diff --git a/flyrs/protos/flyteidl/admin/workflow_attributes.proto b/flyrs/protos/flyteidl/admin/workflow_attributes.proto new file mode 100644 index 0000000000..9767f00df7 --- /dev/null +++ b/flyrs/protos/flyteidl/admin/workflow_attributes.proto @@ -0,0 +1,89 @@ +syntax = "proto3"; + +package flyteidl.admin; +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; + +import "flyteidl/admin/matchable_resource.proto"; + +// Defines a set of custom matching attributes which defines resource defaults for a project, domain and workflow. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message WorkflowAttributes { + // Unique project id for which this set of attributes will be applied. + string project = 1; + + // Unique domain id for which this set of attributes will be applied. + string domain = 2; + + // Workflow name for which this set of attributes will be applied. + string workflow = 3; + + MatchingAttributes matching_attributes = 4; + + // Optional, org key applied to the attributes. + string org = 5; +} + +// Sets custom attributes for a project, domain and workflow combination. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message WorkflowAttributesUpdateRequest { + WorkflowAttributes attributes = 1; +} + +// Purposefully empty, may be populated in the future. +message WorkflowAttributesUpdateResponse { +} + +// Request to get an individual workflow attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message WorkflowAttributesGetRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Unique domain id which this set of attributes references. + // +required + string domain = 2; + + // Workflow name which this set of attributes references. + // +required + string workflow = 3; + + // Which type of matchable attributes to return. + // +required + MatchableResource resource_type = 4; + + // Optional, org key applied to the attributes. + string org = 5; +} + +// Response to get an individual workflow attribute override. +message WorkflowAttributesGetResponse { + WorkflowAttributes attributes = 1; +} + +// Request to delete a set matchable workflow attribute override. +// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +message WorkflowAttributesDeleteRequest { + // Unique project id which this set of attributes references. + // +required + string project = 1; + + // Unique domain id which this set of attributes references. + // +required + string domain = 2; + + // Workflow name which this set of attributes references. + // +required + string workflow = 3; + + // Which type of matchable attributes to delete. + // +required + MatchableResource resource_type = 4; + + // Optional, org key applied to the attributes. + string org = 5; +} + +// Purposefully empty, may be populated in the future. +message WorkflowAttributesDeleteResponse { +} diff --git a/flyrs/protos/flyteidl/cacheservice/cacheservice.proto b/flyrs/protos/flyteidl/cacheservice/cacheservice.proto new file mode 100644 index 0000000000..f7f82f4921 --- /dev/null +++ b/flyrs/protos/flyteidl/cacheservice/cacheservice.proto @@ -0,0 +1,143 @@ +syntax = "proto3"; + +package flyteidl.cacheservice; + +import "flyteidl/core/literals.proto"; +import "flyteidl/core/types.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/interface.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/cacheservice"; + +/* + * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + */ +service CacheService { + // Retrieves cached data by key. + rpc Get (GetCacheRequest) returns (GetCacheResponse); + + // Stores or updates cached data by key. + rpc Put (PutCacheRequest) returns (PutCacheResponse); + + // Deletes cached data by key. + rpc Delete (DeleteCacheRequest) returns (DeleteCacheResponse); + + // Get or extend a reservation for a cache key + rpc GetOrExtendReservation (GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); + + // Release the reservation for a cache key + rpc ReleaseReservation (ReleaseReservationRequest) returns (ReleaseReservationResponse); +} + +/* + * Additional metadata as key-value pairs + */ +message KeyMapMetadata { + map values = 1; // Additional metadata as key-value pairs +} + +/* + * Metadata for cached outputs, including the source identifier and timestamps. + */ +message Metadata { + core.Identifier source_identifier = 1; // Source task or workflow identifier + KeyMapMetadata key_map = 2; // Additional metadata as key-value pairs + google.protobuf.Timestamp created_at = 3; // Creation timestamp + google.protobuf.Timestamp last_updated_at = 4; // Last update timestamp +} + +/* + * Represents cached output, either as literals or an URI, with associated metadata. + */ +message CachedOutput { + oneof output { + flyteidl.core.LiteralMap output_literals = 1; // Output literals + string output_uri = 2; // URI to output data + } + Metadata metadata = 3; // Associated metadata +} + +/* + * Request to retrieve cached data by key. + */ +message GetCacheRequest { + string key = 1; // Cache key +} + +/* + * Response with cached data for a given key. + */ +message GetCacheResponse { + CachedOutput output = 1; // Cached output +} + +/* + * Request to store/update cached data by key. + */ +message PutCacheRequest { + string key = 1; // Cache key + CachedOutput output = 2; // Output to cache + bool overwrite = 3; // Overwrite flag +} + +/* + * Response message of cache store/update operation. + */ +message PutCacheResponse { + // Empty, success indicated by no errors +} + +/* + * Request to delete cached data by key. + */ +message DeleteCacheRequest { + string key = 1; // Cache key +} + +/* + * Response message of cache deletion operation. + */ +message DeleteCacheResponse { + // Empty, success indicated by no errors +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +message Reservation { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation + google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval + google.protobuf.Timestamp expires_at = 4; // Expiration timestamp of this reservation +} + +/* + * Request to get or extend a reservation for a cache key + */ +message GetOrExtendReservationRequest { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation + google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval +} + +/* + * Request to get or extend a reservation for a cache key + */ +message GetOrExtendReservationResponse { + Reservation reservation = 1; // The reservation that was created or extended +} + +/* + * Request to release the reservation for a cache key + */ +message ReleaseReservationRequest { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation +} + +/* + * Response message of release reservation operation. + */ +message ReleaseReservationResponse { + // Empty, success indicated by no errors +} diff --git a/flyrs/protos/flyteidl/core/artifact_id.proto b/flyrs/protos/flyteidl/core/artifact_id.proto new file mode 100644 index 0000000000..022bc20cff --- /dev/null +++ b/flyrs/protos/flyteidl/core/artifact_id.proto @@ -0,0 +1,112 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "google/protobuf/timestamp.proto"; +import "flyteidl/core/identifier.proto"; + + +message ArtifactKey { + // Project and domain and suffix needs to be unique across a given artifact store. + string project = 1; + string domain = 2; + string name = 3; + string org = 4; +} + +// Only valid for triggers +message ArtifactBindingData { + reserved 1 to 4; + // These two fields are only relevant in the partition value case + oneof partition_data { + string partition_key = 5; + bool bind_to_time_partition = 6; + } + + // This is only relevant in the time partition case + TimeTransform time_transform = 7; +} + +enum Granularity { + UNSET = 0; + MINUTE = 1; + HOUR = 2; + DAY = 3; // default + MONTH = 4; +} + +enum Operator { + MINUS = 0; + PLUS = 1; +} + +message TimeTransform { + string transform = 1; + Operator op = 2; +} + +message InputBindingData { + string var = 1; +} + +message RuntimeBinding {} + +message LabelValue { + oneof value { + // The string static value is for use in the Partitions object + string static_value = 1; + + // The time value is for use in the TimePartition case + google.protobuf.Timestamp time_value = 2; + ArtifactBindingData triggered_binding = 3; + InputBindingData input_binding = 4; + RuntimeBinding runtime_binding = 5; + } +} + +message Partitions { + map value = 1; +} + +message TimePartition { + LabelValue value = 1; + Granularity granularity = 2; +} + +message ArtifactID { + ArtifactKey artifact_key = 1; + + string version = 2; + + // Think of a partition as a tag on an Artifact, except it's a key-value pair. + // Different partitions naturally have different versions (execution ids). + Partitions partitions = 3; + + // There is no such thing as an empty time partition - if it's not set, then there is no time partition. + TimePartition time_partition = 4; +} + +message ArtifactTag { + ArtifactKey artifact_key = 1; + + LabelValue value = 2; +} + +// Uniqueness constraints for Artifacts +// - project, domain, name, version, partitions +// Option 2 (tags are standalone, point to an individual artifact id): +// - project, domain, name, alias (points to one partition if partitioned) +// - project, domain, name, partition key, partition value +message ArtifactQuery { + oneof identifier { + ArtifactID artifact_id = 1; + ArtifactTag artifact_tag = 2; + string uri = 3; + + // This is used in the trigger case, where a user specifies a value for an input that is one of the triggering + // artifacts, or a partition value derived from a triggering artifact. + ArtifactBindingData binding = 4; + } +} diff --git a/flyrs/protos/flyteidl/core/catalog.proto b/flyrs/protos/flyteidl/core/catalog.proto new file mode 100644 index 0000000000..4d98c28d7e --- /dev/null +++ b/flyrs/protos/flyteidl/core/catalog.proto @@ -0,0 +1,63 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/identifier.proto"; + +// Indicates the status of CatalogCaching. The reason why this is not embedded in TaskNodeMetadata is, that we may use for other types of nodes as well in the future +enum CatalogCacheStatus { + // Used to indicate that caching was disabled + CACHE_DISABLED = 0; + // Used to indicate that the cache lookup resulted in no matches + CACHE_MISS = 1; + // used to indicate that the associated artifact was a result of a previous execution + CACHE_HIT = 2; + // used to indicate that the resultant artifact was added to the cache + CACHE_POPULATED = 3; + // Used to indicate that cache lookup failed because of an error + CACHE_LOOKUP_FAILURE = 4; + // Used to indicate that cache lookup failed because of an error + CACHE_PUT_FAILURE = 5; + // Used to indicate the cache lookup was skipped + CACHE_SKIPPED = 6; + // Used to indicate that the cache was evicted + CACHE_EVICTED = 7; +}; + +message CatalogArtifactTag { + // Artifact ID is generated name + string artifact_id = 1; + // Flyte computes the tag automatically, as the hash of the values + string name = 2; +}; + +// Catalog artifact information with specific metadata +message CatalogMetadata { + // Dataset ID in the catalog + Identifier dataset_id = 1; + // Artifact tag in the catalog + CatalogArtifactTag artifact_tag = 2; + // Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context + oneof source_execution { + // Today we only support TaskExecutionIdentifier as a source, as catalog caching only works for task executions + TaskExecutionIdentifier source_task_execution = 3; + } +}; + +message CatalogReservation { + // Indicates the status of a catalog reservation operation. + enum Status { + // Used to indicate that reservations are disabled + RESERVATION_DISABLED = 0; + // Used to indicate that a reservation was successfully acquired or extended + RESERVATION_ACQUIRED = 1; + // Used to indicate that an active reservation currently exists + RESERVATION_EXISTS = 2; + // Used to indicate that the reservation has been successfully released + RESERVATION_RELEASED = 3; + // Used to indicate that a reservation operation resulted in failure + RESERVATION_FAILURE = 4; + } +} diff --git a/flyrs/protos/flyteidl/core/compiler.proto b/flyrs/protos/flyteidl/core/compiler.proto new file mode 100644 index 0000000000..620ee26f2d --- /dev/null +++ b/flyrs/protos/flyteidl/core/compiler.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/interface.proto"; +import "flyteidl/core/workflow.proto"; +import "flyteidl/core/tasks.proto"; + +// Adjacency list for the workflow. This is created as part of the compilation process. Every process after the compilation +// step uses this created ConnectionSet +message ConnectionSet { + message IdList { + repeated string ids = 1; + } + + // A list of all the node ids that are downstream from a given node id + map downstream = 7; + + // A list of all the node ids, that are upstream of this node id + map upstream = 8; +} + +// Output of the compilation Step. This object represents one workflow. We store more metadata at this layer +message CompiledWorkflow { + // Completely contained Workflow Template + WorkflowTemplate template = 1; + // For internal use only! This field is used by the system and must not be filled in. Any values set will be ignored. + ConnectionSet connections = 2; +} + +// Output of the compilation step. This object represents one LaunchPlan. We store more metadata at this layer +message CompiledLaunchPlan { + // Completely contained LaunchPlan Template + LaunchPlanTemplate template = 1; +} + +// Output of the Compilation step. This object represent one Task. We store more metadata at this layer +message CompiledTask { + // Completely contained TaskTemplate + TaskTemplate template = 1; +} + +// A Compiled Workflow Closure contains all the information required to start a new execution, or to visualize a workflow +// and its details. The CompiledWorkflowClosure should always contain a primary workflow, that is the main workflow that +// will being the execution. All subworkflows are denormalized. WorkflowNodes refer to the workflow identifiers of +// compiled subworkflows. +message CompiledWorkflowClosure { + //+required + CompiledWorkflow primary = 1; + // Guaranteed that there will only exist one and only one workflow with a given id, i.e., every sub workflow has a + // unique identifier. Also every enclosed subworkflow is used either by a primary workflow or by a subworkflow + // as an inlined workflow + //+optional + repeated CompiledWorkflow sub_workflows = 2; + // Guaranteed that there will only exist one and only one task with a given id, i.e., every task has a unique id + //+required (at least 1) + repeated CompiledTask tasks = 3; + // A collection of launch plans that are compiled. Guaranteed that there will only exist one and only one launch plan + // with a given id, i.e., every launch plan has a unique id. + repeated CompiledLaunchPlan launch_plans = 4; +} diff --git a/flyrs/protos/flyteidl/core/condition.proto b/flyrs/protos/flyteidl/core/condition.proto new file mode 100644 index 0000000000..84c7fb0314 --- /dev/null +++ b/flyrs/protos/flyteidl/core/condition.proto @@ -0,0 +1,63 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/literals.proto"; + +// Defines a 2-level tree where the root is a comparison operator and Operands are primitives or known variables. +// Each expression results in a boolean result. +message ComparisonExpression { + // Binary Operator for each expression + enum Operator { + EQ = 0; + NEQ = 1; + // Greater Than + GT = 2; + GTE = 3; + // Less Than + LT = 4; + LTE = 5; + } + + Operator operator = 1; + Operand left_value = 2; + Operand right_value = 3; +} + +// Defines an operand to a comparison expression. +message Operand { + oneof val { + // Can be a constant + core.Primitive primitive = 1 [deprecated = true]; + // Or one of this node's input variables + string var = 2; + // Replace the primitive field + core.Scalar scalar = 3; + } +} + +// Defines a boolean expression tree. It can be a simple or a conjunction expression. +// Multiple expressions can be combined using a conjunction or a disjunction to result in a final boolean result. +message BooleanExpression { + oneof expr { + ConjunctionExpression conjunction = 1; + ComparisonExpression comparison = 2; + } +} + +// Defines a conjunction expression of two boolean expressions. +message ConjunctionExpression { + // Nested conditions. They can be conjoined using AND / OR + // Order of evaluation is not important as the operators are Commutative + enum LogicalOperator { + // Conjunction + AND = 0; + OR = 1; + } + + LogicalOperator operator = 1; + BooleanExpression left_expression = 2; + BooleanExpression right_expression = 3; +} diff --git a/flyrs/protos/flyteidl/core/dynamic_job.proto b/flyrs/protos/flyteidl/core/dynamic_job.proto new file mode 100644 index 0000000000..1665f5fa29 --- /dev/null +++ b/flyrs/protos/flyteidl/core/dynamic_job.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +import "flyteidl/core/tasks.proto"; +import "flyteidl/core/workflow.proto"; +import "flyteidl/core/literals.proto"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +// Describes a set of tasks to execute and how the final outputs are produced. +message DynamicJobSpec { + // A collection of nodes to execute. + repeated Node nodes = 1; + + // An absolute number of successful completions of nodes required to mark this job as succeeded. As soon as this + // criteria is met, the dynamic job will be marked as successful and outputs will be computed. If this number + // becomes impossible to reach (e.g. number of currently running tasks + number of already succeeded tasks < + // min_successes) the task will be aborted immediately and marked as failed. The default value of this field, if not + // specified, is the count of nodes repeated field. + int64 min_successes = 2; + + // Describes how to bind the final output of the dynamic job from the outputs of executed nodes. The referenced ids + // in bindings should have the generated id for the subtask. + repeated Binding outputs = 3; + + // [Optional] A complete list of task specs referenced in nodes. + repeated TaskTemplate tasks = 4; + + // [Optional] A complete list of task specs referenced in nodes. + repeated WorkflowTemplate subworkflows = 5; +} diff --git a/flyrs/protos/flyteidl/core/errors.proto b/flyrs/protos/flyteidl/core/errors.proto new file mode 100644 index 0000000000..4d25389349 --- /dev/null +++ b/flyrs/protos/flyteidl/core/errors.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/execution.proto"; + +// Error message to propagate detailed errors from container executions to the execution +// engine. +message ContainerError { + // A simplified code for errors, so that we can provide a glossary of all possible errors. + string code = 1; + // A detailed error message. + string message = 2; + + // Defines a generic error type that dictates the behavior of the retry strategy. + enum Kind { + NON_RECOVERABLE = 0; + RECOVERABLE = 1; + } + + // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + Kind kind = 3; + + // Defines the origin of the error (system, user, unknown). + ExecutionError.ErrorKind origin = 4; +} + +// Defines the errors.pb file format the container can produce to communicate +// failure reasons to the execution engine. +message ErrorDocument { + // The error raised during execution. + ContainerError error = 1; +} diff --git a/flyrs/protos/flyteidl/core/execution.proto b/flyrs/protos/flyteidl/core/execution.proto new file mode 100644 index 0000000000..d2eabdc577 --- /dev/null +++ b/flyrs/protos/flyteidl/core/execution.proto @@ -0,0 +1,116 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "google/protobuf/duration.proto"; + +// Indicates various phases of Workflow Execution +message WorkflowExecution { + enum Phase { + UNDEFINED = 0; + QUEUED = 1; + RUNNING = 2; + SUCCEEDING = 3; + SUCCEEDED = 4; + FAILING = 5; + FAILED = 6; + ABORTED = 7; + TIMED_OUT = 8; + ABORTING = 9; + } +} + +// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows +message NodeExecution { + enum Phase { + UNDEFINED = 0; + QUEUED = 1; + RUNNING = 2; + SUCCEEDED = 3; + FAILING = 4; + FAILED = 5; + ABORTED = 6; + SKIPPED = 7; + TIMED_OUT = 8; + DYNAMIC_RUNNING = 9; + RECOVERED = 10; + } +} + +// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, +// but this is the cumulative list that customers may want to know about for their task. +message TaskExecution{ + enum Phase { + UNDEFINED = 0; + QUEUED = 1; + RUNNING = 2; + SUCCEEDED = 3; + ABORTED = 4; + FAILED = 5; + // To indicate cases where task is initializing, like: ErrImagePull, ContainerCreating, PodInitializing + INITIALIZING = 6; + // To address cases, where underlying resource is not available: Backoff error, Resource quota exceeded + WAITING_FOR_RESOURCES = 7; + } +} + + +// Represents the error message from the execution. +message ExecutionError { + // Error code indicates a grouping of a type of error. + // More Info: + string code = 1; + // Detailed description of the error - including stack trace. + string message = 2; + // Full error contents accessible via a URI + string error_uri = 3; + // Error type: System or User + enum ErrorKind { + UNKNOWN = 0; + USER = 1; + SYSTEM = 2; + } + ErrorKind kind = 4; +} + +// Log information for the task that is specific to a log sink +// When our log story is flushed out, we may have more metadata here like log link expiry +message TaskLog { + + enum MessageFormat { + UNKNOWN = 0; + CSV = 1; + JSON = 2; + } + + string uri = 1; + string name = 2; + MessageFormat message_format = 3; + google.protobuf.Duration ttl = 4; +} + +// Represents customized execution run-time attributes. +message QualityOfServiceSpec { + // Indicates how much queueing delay an execution can tolerate. + google.protobuf.Duration queueing_budget = 1; + + // Add future, user-configurable options here +} + +// Indicates the priority of an execution. +message QualityOfService { + enum Tier { + // Default: no quality of service specified. + UNDEFINED = 0; + HIGH = 1; + MEDIUM = 2; + LOW = 3; + } + + oneof designation { + Tier tier = 1; + QualityOfServiceSpec spec = 2; + } +} diff --git a/flyrs/protos/flyteidl/core/identifier.proto b/flyrs/protos/flyteidl/core/identifier.proto new file mode 100644 index 0000000000..48744f7894 --- /dev/null +++ b/flyrs/protos/flyteidl/core/identifier.proto @@ -0,0 +1,80 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +// Indicates a resource type within Flyte. +enum ResourceType { + UNSPECIFIED = 0; + TASK = 1; + WORKFLOW = 2; + LAUNCH_PLAN = 3; + // A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects. + // Eventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects + // in a similar manner to other Flyte objects + DATASET = 4; +} + +// Encapsulation of fields that uniquely identifies a Flyte resource. +message Identifier { + // Identifies the specific type of resource that this identifier corresponds to. + core.ResourceType resource_type = 1; + + // Name of the project the resource belongs to. + string project = 2; + + // Name of the domain the resource belongs to. + // A domain can be considered as a subset within a specific project. + string domain = 3; + + // User provided value for the resource. + string name = 4; + + // Specific version of the resource. + string version = 5; + + // Optional, org key applied to the resource. + string org = 6; +} + +// Encapsulation of fields that uniquely identifies a Flyte workflow execution +message WorkflowExecutionIdentifier { + // Name of the project the resource belongs to. + string project = 1; + + // Name of the domain the resource belongs to. + // A domain can be considered as a subset within a specific project. + string domain = 2; + + // User or system provided value for the resource. + string name = 4; + + // Optional, org key applied to the resource. + string org = 5; +} + +// Encapsulation of fields that identify a Flyte node execution entity. +message NodeExecutionIdentifier { + string node_id = 1; + + WorkflowExecutionIdentifier execution_id = 2; +} + +// Encapsulation of fields that identify a Flyte task execution entity. +message TaskExecutionIdentifier { + core.Identifier task_id = 1; + + core.NodeExecutionIdentifier node_execution_id = 2; + + uint32 retry_attempt = 3; +} + +// Encapsulation of fields the uniquely identify a signal. +message SignalIdentifier { + // Unique identifier for a signal. + string signal_id = 1; + + // Identifies the Flyte workflow execution this signal belongs to. + WorkflowExecutionIdentifier execution_id = 2; +} diff --git a/flyrs/protos/flyteidl/core/interface.proto b/flyrs/protos/flyteidl/core/interface.proto new file mode 100644 index 0000000000..ec7673d9c4 --- /dev/null +++ b/flyrs/protos/flyteidl/core/interface.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/types.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/artifact_id.proto"; + +// Defines a strongly typed variable. +message Variable { + // Variable literal type. + LiteralType type = 1; + + //+optional string describing input variable + string description = 2; + + //+optional This object allows the user to specify how Artifacts are created. + // name, tag, partitions can be specified. The other fields (version and project/domain) are ignored. + core.ArtifactID artifact_partial_id = 3; + + core.ArtifactTag artifact_tag = 4; +} + +// A map of Variables +message VariableMap { + // Defines a map of variable names to variables. + map variables = 1; +} + +// Defines strongly typed inputs and outputs. +message TypedInterface { + VariableMap inputs = 1; + VariableMap outputs = 2; +} + +// A parameter is used as input to a launch plan and has +// the special ability to have a default value or mark itself as required. +message Parameter { + //+required Variable. Defines the type of the variable backing this parameter. + Variable var = 1; + + //+optional + oneof behavior { + // Defines a default value that has to match the variable type defined. + Literal default = 2; + + //+optional, is this value required to be filled. + bool required = 3; + + // This is an execution time search basically that should result in exactly one Artifact with a Type that + // matches the type of the variable. + core.ArtifactQuery artifact_query = 4; + + core.ArtifactID artifact_id = 5; + } +} + +// A map of Parameters. +message ParameterMap { + // Defines a map of parameter names to parameters. + map parameters = 1; +} diff --git a/flyrs/protos/flyteidl/core/literals.proto b/flyrs/protos/flyteidl/core/literals.proto new file mode 100644 index 0000000000..f886873ffb --- /dev/null +++ b/flyrs/protos/flyteidl/core/literals.proto @@ -0,0 +1,183 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "flyteidl/core/types.proto"; + +// Primitive Types +message Primitive { + // Defines one of simple primitive types. These types will get translated into different programming languages as + // described in https://developers.google.com/protocol-buffers/docs/proto#scalar. + oneof value { + int64 integer = 1; + double float_value = 2; + string string_value = 3; + bool boolean = 4; + google.protobuf.Timestamp datetime = 5; + google.protobuf.Duration duration = 6; + } +} + +// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally +// undefined since it can be assigned to a scalar of any LiteralType. +message Void { +} + +// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. +// There are no restrictions on how the uri is formatted since it will depend on how to interact with the store. +message Blob { + BlobMetadata metadata = 1; + string uri = 3; +} + +message BlobMetadata { + BlobType type = 1; +} + +// A simple byte array with a tag to help different parts of the system communicate about what is in the byte array. +// It's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data. +message Binary { + bytes value = 1; + string tag = 2; +} + +// A strongly typed schema that defines the interface of data retrieved from the underlying storage medium. +message Schema { + string uri = 1; + SchemaType type = 3; +} + +// The runtime representation of a tagged union value. See `UnionType` for more details. +message Union { + Literal value = 1; + LiteralType type = 2; +} + +message StructuredDatasetMetadata { + // Bundle the type information along with the literal. + // This is here because StructuredDatasets can often be more defined at run time than at compile time. + // That is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset, + // without any column information, but at run time, you might have that column information. + // flytekit python will copy this type information into the literal, from the type information, if not provided by + // the various plugins (encoders). + // Since this field is run time generated, it's not used for any type checking. + StructuredDatasetType structured_dataset_type = 1; +} + +message StructuredDataset { + // String location uniquely identifying where the data is. + // Should start with the storage location (e.g. s3://, gs://, bq://, etc.) + string uri = 1; + + StructuredDatasetMetadata metadata = 2; +} + +message Scalar { + oneof value { + Primitive primitive = 1; + Blob blob = 2; + Binary binary = 3; + Schema schema = 4; + Void none_type = 5; + Error error = 6; + google.protobuf.Struct generic = 7; + StructuredDataset structured_dataset = 8; + Union union = 9; + } +} + +// A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives. +message Literal { + oneof value { + // A simple value. + Scalar scalar = 1; + + // A collection of literals to allow nesting. + LiteralCollection collection = 2; + + // A map of strings to literals. + LiteralMap map = 3; + } + + // A hash representing this literal. + // This is used for caching purposes. For more details refer to RFC 1893 + // (https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md) + string hash = 4; + + // Additional metadata for literals. + map metadata = 5; +} + +// A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. +message LiteralCollection { + repeated Literal literals = 1; +} + +// A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. +message LiteralMap { + map literals = 1; +} + +// A collection of BindingData items. +message BindingDataCollection { + repeated BindingData bindings = 1; +} + +// A map of BindingData items. +message BindingDataMap { + map bindings = 1; +} + +message UnionInfo { + LiteralType targetType = 1; +} + +// Specifies either a simple value or a reference to another output. +message BindingData { + oneof value { + // A simple scalar value. + Scalar scalar = 1; + + // A collection of binding data. This allows nesting of binding data to any number + // of levels. + BindingDataCollection collection = 2; + + // References an output promised by another node. + OutputReference promise = 3; + + // A map of bindings. The key is always a string. + BindingDataMap map = 4; + } + + UnionInfo union = 5; +} + +// An input/output binding of a variable to either static value or a node output. +message Binding { + // Variable name must match an input/output variable of the node. + string var = 1; + + // Data to use to bind this variable. + BindingData binding = 2; +} + +// A generic key value pair. +message KeyValuePair { + //required. + string key = 1; + + //+optional. + string value = 2; +} + +// Retry strategy associated with an executable unit. +message RetryStrategy { + // Number of retries. Retries will be consumed when the job fails with a recoverable error. + // The number of retries must be less than or equals to 10. + uint32 retries = 5; +} diff --git a/flyrs/protos/flyteidl/core/metrics.proto b/flyrs/protos/flyteidl/core/metrics.proto new file mode 100644 index 0000000000..120cd0f625 --- /dev/null +++ b/flyrs/protos/flyteidl/core/metrics.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/identifier.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/struct.proto"; + +// Span represents a duration trace of Flyte execution. The id field denotes a Flyte execution entity or an operation +// which uniquely identifies the Span. The spans attribute allows this Span to be further broken down into more +// precise definitions. +message Span { + // start_time defines the instance this span began. + google.protobuf.Timestamp start_time = 1; + + // end_time defines the instance this span completed. + google.protobuf.Timestamp end_time = 2; + + oneof id { + // workflow_id is the id of the workflow execution this Span represents. + flyteidl.core.WorkflowExecutionIdentifier workflow_id = 3; + + // node_id is the id of the node execution this Span represents. + flyteidl.core.NodeExecutionIdentifier node_id = 4; + + // task_id is the id of the task execution this Span represents. + flyteidl.core.TaskExecutionIdentifier task_id = 5; + + // operation_id is the id of a unique operation that this Span represents. + string operation_id = 6; + } + + // spans defines a collection of Spans that breakdown this execution. + repeated Span spans = 7; +} + +// ExecutionMetrics is a collection of metrics that are collected during the execution of a Flyte task. +message ExecutionMetricResult { + // The metric this data represents. e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG. + string metric = 1; + + // The result data in prometheus range query result format + // https://prometheus.io/docs/prometheus/latest/querying/api/#expression-query-result-formats. + // This may include multiple time series, differentiated by their metric labels. + // Start time is greater of (execution attempt start, 48h ago) + // End time is lesser of (execution attempt end, now) + google.protobuf.Struct data = 2; +} diff --git a/flyrs/protos/flyteidl/core/security.proto b/flyrs/protos/flyteidl/core/security.proto new file mode 100644 index 0000000000..3aba017476 --- /dev/null +++ b/flyrs/protos/flyteidl/core/security.proto @@ -0,0 +1,130 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +// Secret encapsulates information about the secret a task needs to proceed. An environment variable +// FLYTE_SECRETS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if +// secrets are passed through environment variables. +// FLYTE_SECRETS_DEFAULT_DIR will be passed to indicate the prefix of the path where secrets will be mounted if secrets +// are passed through file mounts. +message Secret { + enum MountType { + // Default case, indicates the client can tolerate either mounting options. + ANY = 0; + + // ENV_VAR indicates the secret needs to be mounted as an environment variable. + ENV_VAR = 1; + + // FILE indicates the secret needs to be mounted as a file. + FILE = 2; + } + + // The name of the secret group where to find the key referenced below. For K8s secrets, this should be the name of + // the v1/secret object. For Confidant, this should be the Credential name. For Vault, this should be the secret name. + // For AWS Secret Manager, this should be the name of the secret. + // +required + string group = 1; + + // The group version to fetch. This is not supported in all secret management systems. It'll be ignored for the ones + // that do not support it. + // +optional + string group_version = 2; + + // The name of the secret to mount. This has to match an existing secret in the system. It's up to the implementation + // of the secret management system to require case sensitivity. For K8s secrets, Confidant and Vault, this should + // match one of the keys inside the secret. For AWS Secret Manager, it's ignored. + // +optional + string key = 3; + + // mount_requirement is optional. Indicates where the secret has to be mounted. If provided, the execution will fail + // if the underlying key management system cannot satisfy that requirement. If not provided, the default location + // will depend on the key management system. + // +optional + MountType mount_requirement = 4; +} + +// OAuth2Client encapsulates OAuth2 Client Credentials to be used when making calls on behalf of that task. +message OAuth2Client { + // client_id is the public id for the client to use. The system will not perform any pre-auth validation that the + // secret requested matches the client_id indicated here. + // +required + string client_id = 1; + + // client_secret is a reference to the secret used to authenticate the OAuth2 client. + // +required + Secret client_secret = 2; +} + +// Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the +// right identity for the execution environment. +message Identity { + // iam_role references the fully qualified name of Identity & Access Management role to impersonate. + string iam_role = 1; + + // k8s_service_account references a kubernetes service account to impersonate. + string k8s_service_account = 2; + + // oauth2_client references an oauth2 client. Backend plugins can use this information to impersonate the client when + // making external calls. + OAuth2Client oauth2_client = 3; + + // execution_identity references the subject who makes the execution + string execution_identity = 4; +} + +// OAuth2TokenRequest encapsulates information needed to request an OAuth2 token. +// FLYTE_TOKENS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if +// tokens are passed through environment variables. +// FLYTE_TOKENS_PATH_PREFIX will be passed to indicate the prefix of the path where secrets will be mounted if tokens +// are passed through file mounts. +message OAuth2TokenRequest { + // Type of the token requested. + enum Type { + // CLIENT_CREDENTIALS indicates a 2-legged OAuth token requested using client credentials. + CLIENT_CREDENTIALS = 0; + } + + // name indicates a unique id for the token request within this task token requests. It'll be used as a suffix for + // environment variables and as a filename for mounting tokens as files. + // +required + string name = 1; + + // type indicates the type of the request to make. Defaults to CLIENT_CREDENTIALS. + // +required + Type type = 2; + + // client references the client_id/secret to use to request the OAuth2 token. + // +required + OAuth2Client client = 3; + + // idp_discovery_endpoint references the discovery endpoint used to retrieve token endpoint and other related + // information. + // +optional + string idp_discovery_endpoint = 4; + + // token_endpoint references the token issuance endpoint. If idp_discovery_endpoint is not provided, this parameter is + // mandatory. + // +optional + string token_endpoint = 5; +} + +// SecurityContext holds security attributes that apply to tasks. +message SecurityContext { + // run_as encapsulates the identity a pod should run as. If the task fills in multiple fields here, it'll be up to the + // backend plugin to choose the appropriate identity for the execution engine the task will run on. + Identity run_as = 1; + + // secrets indicate the list of secrets the task needs in order to proceed. Secrets will be mounted/passed to the + // pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS + // Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access + // to the secret) and to pass it to the remote execution engine. + repeated Secret secrets = 2; + + // tokens indicate the list of token requests the task needs in order to proceed. Tokens will be mounted/passed to the + // pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS + // Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access + // to the secret) and to pass it to the remote execution engine. + repeated OAuth2TokenRequest tokens = 3; +} diff --git a/flyrs/protos/flyteidl/core/tasks.proto b/flyrs/protos/flyteidl/core/tasks.proto new file mode 100644 index 0000000000..20a1fa0cbf --- /dev/null +++ b/flyrs/protos/flyteidl/core/tasks.proto @@ -0,0 +1,351 @@ +syntax = "proto3"; + +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/interface.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/security.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +// A customizable interface to convey resources requested for a container. This can be interpreted differently for different +// container engines. +message Resources { + // Known resource names. + enum ResourceName { + UNKNOWN = 0; + CPU = 1; + GPU = 2; + MEMORY = 3; + STORAGE = 4; + // For Kubernetes-based deployments, pods use ephemeral local storage for scratch space, caching, and for logs. + EPHEMERAL_STORAGE = 5; + } + + // Encapsulates a resource name and value. + message ResourceEntry { + // Resource name. + ResourceName name = 1; + + // Value must be a valid k8s quantity. See + // https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go#L30-L80 + string value = 2; + } + + // The desired set of resources requested. ResourceNames must be unique within the list. + repeated ResourceEntry requests = 1; + + // Defines a set of bounds (e.g. min/max) within which the task can reliably run. ResourceNames must be unique + // within the list. + repeated ResourceEntry limits = 2; +} + +// Metadata associated with the GPU accelerator to allocate to a task. Contains +// information about device type, and for multi-instance GPUs, the partition size to +// use. +message GPUAccelerator { + // This can be any arbitrary string, and should be informed by the labels or taints + // associated with the nodes in question. Default cloud provider labels typically + // use the following values: `nvidia-tesla-t4`, `nvidia-tesla-a100`, etc. + string device = 1; + oneof partition_size_value { + bool unpartitioned = 2; + // Like `device`, this can be any arbitrary string, and should be informed by + // the labels or taints associated with the nodes in question. Default cloud + // provider labels typically use the following values: `1g.5gb`, `2g.10gb`, etc. + string partition_size = 3; + } +} + +// Encapsulates all non-standard resources, not captured by v1.ResourceRequirements, to +// allocate to a task. +message ExtendedResources { + // GPU accelerator to select for task. Contains information about device type, and + // for multi-instance GPUs, the partition size to use. + GPUAccelerator gpu_accelerator = 1; +} + +// Runtime information. This is loosely defined to allow for extensibility. +message RuntimeMetadata { + enum RuntimeType { + OTHER = 0; + FLYTE_SDK = 1; + } + + // Type of runtime. + RuntimeType type = 1; + + // Version of the runtime. All versions should be backward compatible. However, certain cases call for version + // checks to ensure tighter validation or setting expectations. + string version = 2; + + //+optional It can be used to provide extra information about the runtime (e.g. python, golang... etc.). + string flavor = 3; +} + +// Task Metadata +message TaskMetadata { + // Indicates whether the system should attempt to lookup this task's output to avoid duplication of work. + bool discoverable = 1; + + // Runtime information about the task. + RuntimeMetadata runtime = 2; + + // The overall timeout of a task including user-triggered retries. + google.protobuf.Duration timeout = 4; + + // Number of retries per task. + RetryStrategy retries = 5; + + // Indicates a logical version to apply to this task for the purpose of discovery. + string discovery_version = 6; + + // If set, this indicates that this task is deprecated. This will enable owners of tasks to notify consumers + // of the ending of support for a given task. + string deprecated_error_message = 7; + + // For interruptible we will populate it at the node level but require it be part of TaskMetadata + // for a user to set the value. + // We are using oneof instead of bool because otherwise we would be unable to distinguish between value being + // set by the user or defaulting to false. + // The logic of handling precedence will be done as part of flytepropeller. + + // Identify whether task is interruptible + oneof interruptible_value { + bool interruptible = 8; + }; + + // Indicates whether the system should attempt to execute discoverable instances in serial to avoid duplicate work + bool cache_serializable = 9; + + // Indicates whether the task will generate a Deck URI when it finishes executing. + bool generates_deck = 10; + + // Arbitrary tags that allow users and the platform to store small but arbitrary labels + map tags = 11; + + // pod_template_name is the unique name of a PodTemplate k8s resource to be used as the base configuration if this + // task creates a k8s Pod. If this value is set, the specified PodTemplate will be used instead of, but applied + // identically as, the default PodTemplate configured in FlytePropeller. + string pod_template_name = 12; + + // cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. + repeated string cache_ignore_input_vars = 13; +} + +// A Task structure that uniquely identifies a task in the system +// Tasks are registered as a first step in the system. +message TaskTemplate { + // Auto generated taskId by the system. Task Id uniquely identifies this task globally. + Identifier id = 1; + + // A predefined yet extensible Task type identifier. This can be used to customize any of the components. If no + // extensions are provided in the system, Flyte will resolve the this task to its TaskCategory and default the + // implementation registered for the TaskCategory. + string type = 2; + + // Extra metadata about the task. + TaskMetadata metadata = 3; + + // A strongly typed interface for the task. This enables others to use this task within a workflow and guarantees + // compile-time validation of the workflow to avoid costly runtime failures. + TypedInterface interface = 4; + + // Custom data about the task. This is extensible to allow various plugins in the system. + google.protobuf.Struct custom = 5; + + // Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. + // If no corresponding execution-layer plugins are found, the system will default to handling these using built-in + // handlers. + oneof target { + Container container = 6; + K8sPod k8s_pod = 17; + Sql sql = 18; + } + + // This can be used to customize task handling at execution time for the same task type. + int32 task_type_version = 7; + + // security_context encapsulates security attributes requested to run this task. + SecurityContext security_context = 8; + + // Encapsulates all non-standard resources, not captured by + // v1.ResourceRequirements, to allocate to a task. + ExtendedResources extended_resources = 9; + + // Metadata about the custom defined for this task. This is extensible to allow various plugins in the system + // to use as required. + // reserve the field numbers 1 through 15 for very frequently occurring message elements + map config = 16; +} + +// ----------------- First class Plugins + +// Defines port properties for a container. +message ContainerPort { + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + uint32 container_port = 1; +} + +message Container { + // Container image url. Eg: docker/redis:latest + string image = 1; + + // Command to be executed, if not provided, the default entrypoint in the container image will be used. + repeated string command = 2; + + // These will default to Flyte given paths. If provided, the system will not append known paths. If the task still + // needs flyte's inputs and outputs path, add $(FLYTE_INPUT_FILE), $(FLYTE_OUTPUT_FILE) wherever makes sense and the + // system will populate these before executing the container. + repeated string args = 3; + + // Container resources requirement as specified by the container engine. + Resources resources = 4; + + // Environment variables will be set as the container is starting up. + repeated KeyValuePair env = 5; + + // Allows extra configs to be available for the container. + // TODO: elaborate on how configs will become available. + // Deprecated, please use TaskTemplate.config instead. + repeated KeyValuePair config = 6 [deprecated = true]; + + // Ports to open in the container. This feature is not supported by all execution engines. (e.g. supported on K8s but + // not supported on AWS Batch) + // Only K8s + repeated ContainerPort ports = 7; + + // BETA: Optional configuration for DataLoading. If not specified, then default values are used. + // This makes it possible to to run a completely portable container, that uses inputs and outputs + // only from the local file-system and without having any reference to flyteidl. This is supported only on K8s at the moment. + // If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories + // are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation + // to understand the default paths. + // Only K8s + DataLoadingConfig data_config = 9; + + // Architecture-type the container image supports. + enum Architecture { + UNKNOWN = 0; + AMD64 = 1; + ARM64 = 2; + ARM_V6 = 3; + ARM_V7 = 4; + } + Architecture architecture = 10; +} + +// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) +message IOStrategy { + // Mode to use for downloading + enum DownloadMode { + // All data will be downloaded before the main container is executed + DOWNLOAD_EAGER = 0; + // Data will be downloaded as a stream and an End-Of-Stream marker will be written to indicate all data has been downloaded. Refer to protocol for details + DOWNLOAD_STREAM = 1; + // Large objects (offloaded) will not be downloaded + DO_NOT_DOWNLOAD = 2; + } + // Mode to use for uploading + enum UploadMode { + // All data will be uploaded after the main container exits + UPLOAD_ON_EXIT = 0; + // Data will be uploaded as it appears. Refer to protocol specification for details + UPLOAD_EAGER = 1; + // Data will not be uploaded, only references will be written + DO_NOT_UPLOAD = 2; + } + // Mode to use to manage downloads + DownloadMode download_mode = 1; + // Mode to use to manage uploads + UploadMode upload_mode = 2; +} + +// This configuration allows executing raw containers in Flyte using the Flyte CoPilot system. +// Flyte CoPilot, eliminates the needs of flytekit or sdk inside the container. Any inputs required by the users container are side-loaded in the input_path +// Any outputs generated by the user container - within output_path are automatically uploaded. +message DataLoadingConfig { + // LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers. + // If the user has access to the protocol buffer definitions, it is recommended to use the PROTO format. + // JSON and YAML do not need any protobuf definitions to read it + // All remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem) + enum LiteralMapFormat { + // JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - https://www.json.org/json-en.html + JSON = 0; + YAML = 1; + // Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core + PROTO = 2; + } + // Flag enables DataLoading Config. If this is not set, data loading will not be used! + bool enabled = 1; + // File system path (start at root). This folder will contain all the inputs exploded to a separate file. + // Example, if the input interface needs (x: int, y: blob, z: multipart_blob) and the input path is '/var/flyte/inputs', then the file system will look like + // /var/flyte/inputs/inputs. .pb .json .yaml> -> Format as defined previously. The Blob and Multipart blob will reference local filesystem instead of remote locations + // /var/flyte/inputs/x -> X is a file that contains the value of x (integer) in string format + // /var/flyte/inputs/y -> Y is a file in Binary format + // /var/flyte/inputs/z/... -> Note Z itself is a directory + // More information about the protocol - refer to docs #TODO reference docs here + string input_path = 2; + // File system path (start at root). This folder should contain all the outputs for the task as individual files and/or an error text file + string output_path = 3; + // In the inputs folder, there will be an additional summary/metadata file that contains references to all files or inlined primitive values. + // This format decides the actual encoding for the data. Refer to the encoding to understand the specifics of the contents and the encoding + LiteralMapFormat format = 4; + IOStrategy io_strategy = 5; +} + +// Defines a pod spec and additional pod metadata that is created when a task is executed. +message K8sPod { + // Contains additional metadata for building a kubernetes pod. + K8sObjectMetadata metadata = 1; + + // Defines the primary pod spec created when a task is executed. + // This should be a JSON-marshalled pod spec, which can be defined in + // - go, using: https://github.com/kubernetes/api/blob/release-1.21/core/v1/types.go#L2936 + // - python: using https://github.com/kubernetes-client/python/blob/release-19.0/kubernetes/client/models/v1_pod_spec.py + google.protobuf.Struct pod_spec = 2; + + // BETA: Optional configuration for DataLoading. If not specified, then default values are used. + // This makes it possible to to run a completely portable container, that uses inputs and outputs + // only from the local file-system and without having any reference to flytekit. This is supported only on K8s at the moment. + // If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories + // are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation + // to understand the default paths. + // Only K8s + DataLoadingConfig data_config = 3; +} + +// Metadata for building a kubernetes object when a task is executed. +message K8sObjectMetadata { + // Optional labels to add to the pod definition. + map labels = 1; + + // Optional annotations to add to the pod definition. + map annotations = 2; +} + +// Sql represents a generic sql workload with a statement and dialect. +message Sql { + // The actual query to run, the query can have templated parameters. + // We use Flyte's Golang templating format for Query templating. + // For example, + // insert overwrite directory '{{ .rawOutputDataPrefix }}' stored as parquet + // select * + // from my_table + // where ds = '{{ .Inputs.ds }}' + string statement = 1; + // The dialect of the SQL statement. This is used to validate and parse SQL statements at compilation time to avoid + // expensive runtime operations. If set to an unsupported dialect, no validation will be done on the statement. + // We support the following dialect: ansi, hive. + enum Dialect { + UNDEFINED = 0; + ANSI = 1; + HIVE = 2; + OTHER = 3; + } + Dialect dialect = 2; +} diff --git a/flyrs/protos/flyteidl/core/types.proto b/flyrs/protos/flyteidl/core/types.proto new file mode 100644 index 0000000000..2c36ff32ec --- /dev/null +++ b/flyrs/protos/flyteidl/core/types.proto @@ -0,0 +1,208 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "google/protobuf/struct.proto"; + +// Define a set of simple types. +enum SimpleType { + NONE = 0; + INTEGER = 1; + FLOAT = 2; + STRING = 3; + BOOLEAN = 4; + DATETIME = 5; + DURATION = 6; + BINARY = 7; + ERROR = 8; + STRUCT = 9; +} + +// Defines schema columns and types to strongly type-validate schemas interoperability. +message SchemaType { + message SchemaColumn { + // A unique name -within the schema type- for the column + string name = 1; + + enum SchemaColumnType { + INTEGER = 0; + FLOAT = 1; + STRING = 2; + BOOLEAN = 3; + DATETIME = 4; + DURATION = 5; + } + + // The column type. This allows a limited set of types currently. + SchemaColumnType type = 2; + } + + // A list of ordered columns this schema comprises of. + repeated SchemaColumn columns = 3; +} + +message StructuredDatasetType { + message DatasetColumn { + // A unique name within the schema type for the column. + string name = 1; + + // The column type. + LiteralType literal_type = 2; + } + + // A list of ordered columns this schema comprises of. + repeated DatasetColumn columns = 1; + + // This is the storage format, the format of the bits at rest + // parquet, feather, csv, etc. + // For two types to be compatible, the format will need to be an exact match. + string format = 2; + + // This is a string representing the type that the bytes in external_schema_bytes are formatted in. + // This is an optional field that will not be used for type checking. + string external_schema_type = 3; + + // The serialized bytes of a third-party schema library like Arrow. + // This is an optional field that will not be used for type checking. + bytes external_schema_bytes = 4; +} + +// Defines type behavior for blob objects +message BlobType { + enum BlobDimensionality { + SINGLE = 0; + MULTIPART = 1; + } + + // Format can be a free form string understood by SDK/UI etc like + // csv, parquet etc + string format = 1; + BlobDimensionality dimensionality = 2; +} + +// Enables declaring enum types, with predefined string values +// For len(values) > 0, the first value in the ordered list is regarded as the default value. If you wish +// To provide no defaults, make the first value as undefined. +message EnumType { + // Predefined set of enum values. + repeated string values = 1; +} + +// Defines a tagged union type, also known as a variant (and formally as the sum type). +// +// A sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag +// A value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by +// storing the varaint's tag with the literal value and can be examined in runtime. +// +// Type S is typically written as +// S := Apple A | Banana B | Cantaloupe C | ... +// +// Notably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value: +// Optional X := X | Null +// +// See also: https://en.wikipedia.org/wiki/Tagged_union +message UnionType { + // Predefined set of variants in union. + repeated LiteralType variants = 1; +} + +// Hints to improve type matching +// e.g. allows distinguishing output from custom type transformers +// even if the underlying IDL serialization matches. +message TypeStructure { + // Must exactly match for types to be castable + string tag = 1; + // dataclass_type only exists for dataclasses. + // This is used to resolve the type of the fields of dataclass + // The key is the field name, and the value is the literal type of the field + // e.g. For dataclass Foo, with fields a, and a is a string + // Foo.a will be resolved as a literal type of string from dataclass_type + map dataclass_type = 2; +} + +// TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs. +message TypeAnnotation { + // A arbitrary JSON payload to describe a type. + google.protobuf.Struct annotations = 1; +} + +// Defines a strong type to allow type checking between interfaces. +message LiteralType { + oneof type { + // A simple type that can be compared one-to-one with another. + SimpleType simple = 1; + + // A complex type that requires matching of inner fields. + SchemaType schema = 2; + + // Defines the type of the value of a collection. Only homogeneous collections are allowed. + LiteralType collection_type = 3; + + // Defines the type of the value of a map type. The type of the key is always a string. + LiteralType map_value_type = 4; + + // A blob might have specialized implementation details depending on associated metadata. + BlobType blob = 5; + + // Defines an enum with pre-defined string values. + EnumType enum_type = 7; + + // Generalized schema support + StructuredDatasetType structured_dataset_type = 8; + + // Defines an union type with pre-defined LiteralTypes. + UnionType union_type = 10; + } + + // This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by + // consumers to identify special behavior or display extended information for the type. + google.protobuf.Struct metadata = 6; + + // This field contains arbitrary data that might have special semantic + // meaning for the client but does not effect internal flyte behavior. + TypeAnnotation annotation = 9; + + // Hints to improve type matching. + TypeStructure structure = 11; +} + +// A reference to an output produced by a node. The type can be retrieved -and validated- from +// the underlying interface of the node. +message OutputReference { + // Node id must exist at the graph layer. + string node_id = 1; + + // Variable name must refer to an output variable for the node. + string var = 2; + + repeated PromiseAttribute attr_path = 3; +} + +// PromiseAttribute stores the attribute path of a promise, which will be resolved at runtime. +// The attribute path is a list of strings and integers. +// In the following example, +// ``` +// @workflow +// def wf(): +// o = t1() +// t2(o.a["b"][0]) +// ``` +// the output reference t2 binds to has a list of PromiseAttribute ["a", "b", 0] + +message PromiseAttribute { + oneof value { + string string_value = 1; + int32 int_value = 2; + } +} + +// Represents an error thrown from a node. +message Error { + // The node id that threw the error. + string failed_node_id = 1; + + // Error message thrown. + string message = 2; +} diff --git a/flyrs/protos/flyteidl/core/workflow.proto b/flyrs/protos/flyteidl/core/workflow.proto new file mode 100644 index 0000000000..4701526d72 --- /dev/null +++ b/flyrs/protos/flyteidl/core/workflow.proto @@ -0,0 +1,315 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/condition.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/interface.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/tasks.proto"; +import "flyteidl/core/types.proto"; +import "flyteidl/core/security.proto"; +import "google/protobuf/duration.proto"; + +// Defines a condition and the execution unit that should be executed if the condition is satisfied. +message IfBlock { + core.BooleanExpression condition = 1; + Node then_node = 2; +} + +// Defines a series of if/else blocks. The first branch whose condition evaluates to true is the one to execute. +// If no conditions were satisfied, the else_node or the error will execute. +message IfElseBlock { + //+required. First condition to evaluate. + IfBlock case = 1; + + //+optional. Additional branches to evaluate. + repeated IfBlock other = 2; + + //+required. + oneof default { + // The node to execute in case none of the branches were taken. + Node else_node = 3; + + // An error to throw in case none of the branches were taken. + Error error = 4; + } +} + +// BranchNode is a special node that alter the flow of the workflow graph. It allows the control flow to branch at +// runtime based on a series of conditions that get evaluated on various parameters (e.g. inputs, primitives). +message BranchNode { + //+required + IfElseBlock if_else = 1; +} + +// Refers to the task that the Node is to execute. +message TaskNode { + oneof reference { + // A globally unique identifier for the task. + Identifier reference_id = 1; + } + + // Optional overrides applied at task execution time. + TaskNodeOverrides overrides = 2; +} + +// Refers to a the workflow the node is to execute. +message WorkflowNode { + oneof reference { + // A globally unique identifier for the launch plan. + Identifier launchplan_ref = 1; + + // Reference to a subworkflow, that should be defined with the compiler context + Identifier sub_workflow_ref = 2; + } +} + +// ApproveCondition represents a dependency on an external approval. During execution, this will manifest as a boolean +// signal with the provided signal_id. +message ApproveCondition { + // A unique identifier for the requested boolean signal. + string signal_id = 1; +} + +// SignalCondition represents a dependency on an signal. +message SignalCondition { + // A unique identifier for the requested signal. + string signal_id = 1; + + // A type denoting the required value type for this signal. + LiteralType type = 2; + + // The variable name for the signal value in this nodes outputs. + string output_variable_name = 3; +} + +// SleepCondition represents a dependency on waiting for the specified duration. +message SleepCondition { + // The overall duration for this sleep. + google.protobuf.Duration duration = 1; +} + +// GateNode refers to the condition that is required for the gate to successfully complete. +message GateNode { + oneof condition { + // ApproveCondition represents a dependency on an external approval provided by a boolean signal. + ApproveCondition approve = 1; + + // SignalCondition represents a dependency on an signal. + SignalCondition signal = 2; + + // SleepCondition represents a dependency on waiting for the specified duration. + SleepCondition sleep = 3; + } +} + +// ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input +// values. An ArrayNode can be executed with configurable parallelism (separate from the parent +// workflow) and can be configured to succeed when a certain number of sub-nodes succeed. +message ArrayNode { + // node is the sub-node that will be executed for each element in the array. + Node node = 1; + + // parallelism defines the minimum number of instances to bring up concurrently at any given + // point. Note that this is an optimistic restriction and that, due to network partitioning or + // other failures, the actual number of currently running instances might be more. This has to + // be a positive number if assigned. Default value is size. + uint32 parallelism = 2; + + oneof success_criteria { + // min_successes is an absolute number of the minimum number of successful completions of + // sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful + // and outputs will be computed. This has to be a non-negative number if assigned. Default + // value is size (if specified). + uint32 min_successes = 3; + + // If the array job size is not known beforehand, the min_success_ratio can instead be used + // to determine when an ArrayNode can be marked successful. + float min_success_ratio = 4; + } +} + +// Defines extra information about the Node. +message NodeMetadata { + // A friendly name for the Node + string name = 1; + + // The overall timeout of a task. + google.protobuf.Duration timeout = 4; + + // Number of retries per task. + RetryStrategy retries = 5; + + // Identify whether node is interruptible + oneof interruptible_value { + bool interruptible = 6; + }; + + // Identify whether a node should have it's outputs cached. + oneof cacheable_value { + bool cacheable = 7; + } + + // The version of the cache to use. + oneof cache_version_value { + string cache_version = 8; + } + + // Identify whether caching operations involving this node should be serialized. + oneof cache_serializable_value { + bool cache_serializable = 9; + } +} + +// Links a variable to an alias. +message Alias { + // Must match one of the output variable names on a node. + string var = 1; + + // A workflow-level unique alias that downstream nodes can refer to in their input. + string alias = 2; +} + +// A Workflow graph Node. One unit of execution in the graph. Each node can be linked to a Task, a Workflow or a branch +// node. +message Node { + // A workflow-level unique identifier that identifies this node in the workflow. 'inputs' and 'outputs' are reserved + // node ids that cannot be used by other nodes. + string id = 1; + + // Extra metadata about the node. + NodeMetadata metadata = 2; + + // Specifies how to bind the underlying interface's inputs. All required inputs specified in the underlying interface + // must be fulfilled. + repeated Binding inputs = 3; + + //+optional Specifies execution dependency for this node ensuring it will only get scheduled to run after all its + // upstream nodes have completed. This node will have an implicit dependency on any node that appears in inputs + // field. + repeated string upstream_node_ids = 4; + + //+optional. A node can define aliases for a subset of its outputs. This is particularly useful if different nodes + // need to conform to the same interface (e.g. all branches in a branch node). Downstream nodes must refer to this + // nodes outputs using the alias if one's specified. + repeated Alias output_aliases = 5; + + // Information about the target to execute in this node. + oneof target { + // Information about the Task to execute in this node. + TaskNode task_node = 6; + + // Information about the Workflow to execute in this mode. + WorkflowNode workflow_node = 7; + + // Information about the branch node to evaluate in this node. + BranchNode branch_node = 8; + + // Information about the condition to evaluate in this node. + GateNode gate_node = 9; + + // Information about the sub-node executions for each value in the list of this nodes + // inputs values. + ArrayNode array_node = 10; + } +} + +// This is workflow layer metadata. These settings are only applicable to the workflow as a whole, and do not +// percolate down to child entities (like tasks) launched by the workflow. +message WorkflowMetadata { + // Indicates the runtime priority of workflow executions. + QualityOfService quality_of_service = 1; + + // Failure Handling Strategy + enum OnFailurePolicy { + // FAIL_IMMEDIATELY instructs the system to fail as soon as a node fails in the workflow. It'll automatically + // abort all currently running nodes and clean up resources before finally marking the workflow executions as + // failed. + FAIL_IMMEDIATELY = 0; + + // FAIL_AFTER_EXECUTABLE_NODES_COMPLETE instructs the system to make as much progress as it can. The system will + // not alter the dependencies of the execution graph so any node that depend on the failed node will not be run. + // Other nodes that will be executed to completion before cleaning up resources and marking the workflow + // execution as failed. + FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = 1; + } + + // Defines how the system should behave when a failure is detected in the workflow execution. + OnFailurePolicy on_failure = 2; + + // Arbitrary tags that allow users and the platform to store small but arbitrary labels + map tags = 3; +} + +// The difference between these settings and the WorkflowMetadata ones is that these are meant to be passed down to +// a workflow's underlying entities (like tasks). For instance, 'interruptible' has no meaning at the workflow layer, it +// is only relevant when a task executes. The settings here are the defaults that are passed to all nodes +// unless explicitly overridden at the node layer. +// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be +// added to both this object and the WorkflowMetadata object above. +message WorkflowMetadataDefaults { + // Whether child nodes of the workflow are interruptible. + bool interruptible = 1; +} + +// Flyte Workflow Structure that encapsulates task, branch and subworkflow nodes to form a statically analyzable, +// directed acyclic graph. +message WorkflowTemplate { + // A globally unique identifier for the workflow. + Identifier id = 1; + + // Extra metadata about the workflow. + WorkflowMetadata metadata = 2; + + // Defines a strongly typed interface for the Workflow. This can include some optional parameters. + TypedInterface interface = 3; + + // A list of nodes. In addition, 'globals' is a special reserved node id that can be used to consume workflow inputs. + repeated Node nodes = 4; + + // A list of output bindings that specify how to construct workflow outputs. Bindings can pull node outputs or + // specify literals. All workflow outputs specified in the interface field must be bound in order for the workflow + // to be validated. A workflow has an implicit dependency on all of its nodes to execute successfully in order to + // bind final outputs. + // Most of these outputs will be Binding's with a BindingData of type OutputReference. That is, your workflow can + // just have an output of some constant (`Output(5)`), but usually, the workflow will be pulling + // outputs from the output of a task. + repeated Binding outputs = 5; + + //+optional A catch-all node. This node is executed whenever the execution engine determines the workflow has failed. + // The interface of this node must match the Workflow interface with an additional input named 'error' of type + // pb.lyft.flyte.core.Error. + Node failure_node = 6; + + // workflow defaults + WorkflowMetadataDefaults metadata_defaults = 7; +} + +// Optional task node overrides that will be applied at task execution time. +message TaskNodeOverrides { + // A customizable interface to convey resources requested for a task container. + Resources resources = 1; + + // Overrides for all non-standard resources, not captured by + // v1.ResourceRequirements, to allocate to a task. + ExtendedResources extended_resources = 2; + + // Override for the image used by task pods. + string container_image = 3; +} + +// A structure that uniquely identifies a launch plan in the system. +message LaunchPlanTemplate { + // A globally unique identifier for the launch plan. + Identifier id = 1; + + // The input and output interface for the launch plan + TypedInterface interface = 2; + + // A collection of input literals that are fixed for the launch plan + LiteralMap fixed_inputs = 3; +} diff --git a/flyrs/protos/flyteidl/core/workflow_closure.proto b/flyrs/protos/flyteidl/core/workflow_closure.proto new file mode 100644 index 0000000000..c8ee990036 --- /dev/null +++ b/flyrs/protos/flyteidl/core/workflow_closure.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package flyteidl.core; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; + +import "flyteidl/core/workflow.proto"; +import "flyteidl/core/tasks.proto"; + +// Defines an enclosed package of workflow and tasks it references. +message WorkflowClosure { + //required. Workflow template. + WorkflowTemplate workflow = 1; + + //optional. A collection of tasks referenced by the workflow. Only needed if the workflow + // references tasks. + repeated TaskTemplate tasks = 2; +} diff --git a/flyrs/protos/flyteidl/datacatalog/datacatalog.proto b/flyrs/protos/flyteidl/datacatalog/datacatalog.proto new file mode 100644 index 0000000000..e296603113 --- /dev/null +++ b/flyrs/protos/flyteidl/datacatalog/datacatalog.proto @@ -0,0 +1,420 @@ +syntax = "proto3"; + +package datacatalog; + +import "flyteidl/core/literals.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/datacatalog"; + +/* + * Data Catalog service definition + * Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + * Artifacts are associated with a Dataset, and can be tagged for retrieval. + */ +service DataCatalog { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + rpc CreateDataset (CreateDatasetRequest) returns (CreateDatasetResponse); + + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + rpc GetDataset (GetDatasetRequest) returns (GetDatasetResponse); + + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + rpc CreateArtifact (CreateArtifactRequest) returns (CreateArtifactResponse); + + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + rpc GetArtifact (GetArtifactRequest) returns (GetArtifactResponse); + + // Associate a tag with an artifact. Tags are unique within a Dataset. + rpc AddTag (AddTagRequest) returns (AddTagResponse); + + // Return a paginated list of artifacts + rpc ListArtifacts (ListArtifactsRequest) returns (ListArtifactsResponse); + + // Return a paginated list of datasets + rpc ListDatasets (ListDatasetsRequest) returns (ListDatasetsResponse); + + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + rpc UpdateArtifact (UpdateArtifactRequest) returns (UpdateArtifactResponse); + + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + rpc GetOrExtendReservation (GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); + + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + rpc ReleaseReservation (ReleaseReservationRequest) returns (ReleaseReservationResponse); +} + +/* + * Request message for creating a Dataset. + */ +message CreateDatasetRequest { + Dataset dataset = 1; +} + +/* + * Response message for creating a Dataset + */ +message CreateDatasetResponse { + +} + +/* + * Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier + * which is a combination of several fields. + */ +message GetDatasetRequest { + DatasetID dataset = 1; +} + +/* + * Response message for retrieving a Dataset. The response will include the metadata for the + * Dataset. + */ +message GetDatasetResponse { + Dataset dataset = 1; +} + +/* + * Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that + * can be one of artifact_id or tag. The result returned will include the artifact data and metadata + * associated with the artifact. + */ +message GetArtifactRequest { + DatasetID dataset = 1; + + oneof query_handle { + string artifact_id = 2; + string tag_name = 3; + } +} + +/* + * Response message for retrieving an Artifact. The result returned will include the artifact data + * and metadata associated with the artifact. + */ +message GetArtifactResponse { + Artifact artifact = 1; +} + +/* + * Request message for creating an Artifact and its associated artifact Data. + */ +message CreateArtifactRequest { + Artifact artifact = 1; +} + +/* + * Response message for creating an Artifact. + */ +message CreateArtifactResponse { + +} + +/* + * Request message for tagging an Artifact. + */ +message AddTagRequest { + Tag tag = 1; +} + +/* + * Response message for tagging an Artifact. + */ +message AddTagResponse { + +} + +// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. +message ListArtifactsRequest { + // Use a datasetID for which you want to retrieve the artifacts + DatasetID dataset = 1; + + // Apply the filter expression to this query + FilterExpression filter = 2; + // Pagination options to get a page of artifacts + PaginationOptions pagination = 3; +} + +// Response to list artifacts +message ListArtifactsResponse { + // The list of artifacts + repeated Artifact artifacts = 1; + // Token to use to request the next page, pass this into the next requests PaginationOptions + string next_token = 2; +} + +// List the datasets for the given query +message ListDatasetsRequest { + // Apply the filter expression to this query + FilterExpression filter = 1; + // Pagination options to get a page of datasets + PaginationOptions pagination = 2; +} + +// List the datasets response with token for next pagination +message ListDatasetsResponse { + // The list of datasets + repeated Dataset datasets = 1; + // Token to use to request the next page, pass this into the next requests PaginationOptions + string next_token = 2; +} + +/* + * Request message for updating an Artifact and overwriting its associated ArtifactData. + */ +message UpdateArtifactRequest { + // ID of dataset the artifact is associated with + DatasetID dataset = 1; + + // Either ID of artifact or name of tag to retrieve existing artifact from + oneof query_handle { + string artifact_id = 2; + string tag_name = 3; + } + + // List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + // ArtifactData entries will be removed from the underlying blob storage and database. + repeated ArtifactData data = 4; + + // Update execution metadata(including execution domain, name, node, project data) when overwriting cache + Metadata metadata = 5; +} + +/* + * Response message for updating an Artifact. + */ +message UpdateArtifactResponse { + // The unique ID of the artifact updated + string artifact_id = 1; +} + +/* + * ReservationID message that is composed of several string fields. + */ +message ReservationID { + // The unique ID for the reserved dataset + DatasetID dataset_id = 1; + + // The specific artifact tag for the reservation + string tag_name = 2; +} + +// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. +message GetOrExtendReservationRequest { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; + + // Requested reservation extension heartbeat interval + google.protobuf.Duration heartbeat_interval = 3; +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +message Reservation { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; + + // Recommended heartbeat interval to extend reservation + google.protobuf.Duration heartbeat_interval = 3; + + // Expiration timestamp of this reservation + google.protobuf.Timestamp expires_at = 4; + + // Free-form metadata associated with the artifact + Metadata metadata = 6; +} + +// Response including either a newly minted reservation or the existing reservation +message GetOrExtendReservationResponse { + // The reservation to be acquired or extended + Reservation reservation = 1; +} + +// Request to release reservation +message ReleaseReservationRequest { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; +} + +// Response to release reservation +message ReleaseReservationResponse { + +} + +/* + * Dataset message. It is uniquely identified by DatasetID. + */ +message Dataset { + DatasetID id = 1; + Metadata metadata = 2; + repeated string partitionKeys = 3; +} + +/* + * An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair + */ +message Partition { + string key = 1; + string value = 2; +} + +/* + * DatasetID message that is composed of several string fields. + */ +message DatasetID { + string project = 1; // The name of the project + string name = 2; // The name of the dataset + string domain = 3; // The domain (eg. environment) + string version = 4; // Version of the data schema + string UUID = 5; // UUID for the dataset (if set the above fields are optional) + + // Optional, org key applied to the resource. + string org = 6; +} + +/* + * Artifact message. It is composed of several string fields. + */ +message Artifact { + string id = 1; // The unique ID of the artifact + DatasetID dataset = 2; // The Dataset that the artifact belongs to + repeated ArtifactData data = 3; // A list of data that is associated with the artifact + Metadata metadata = 4; // Free-form metadata associated with the artifact + repeated Partition partitions = 5; + repeated Tag tags = 6; + google.protobuf.Timestamp created_at = 7; // creation timestamp of artifact, autogenerated by service +} + +/* + * ArtifactData that belongs to an artifact + */ +message ArtifactData { + string name = 1; + flyteidl.core.Literal value = 2; +} + +/* + * Tag message that is unique to a Dataset. It is associated to a single artifact and + * can be retrieved by name later. + */ +message Tag { + string name = 1; // Name of tag + string artifact_id = 2; // The tagged artifact + DatasetID dataset = 3; // The Dataset that this tag belongs to +} + +/* + * Metadata representation for artifacts and datasets + */ +message Metadata { + map key_map = 1; // key map is a dictionary of key/val strings that represent metadata +} + +// Filter expression that is composed of a combination of single filters +message FilterExpression { + repeated SinglePropertyFilter filters = 1; +} + +// A single property to filter on. +message SinglePropertyFilter { + oneof property_filter { + TagPropertyFilter tag_filter = 1; + PartitionPropertyFilter partition_filter = 2; + ArtifactPropertyFilter artifact_filter = 3; + DatasetPropertyFilter dataset_filter = 4; + } + + // as use-cases come up we can add more operators, ex: gte, like, not eq etc. + enum ComparisonOperator { + EQUALS = 0; + } + + ComparisonOperator operator = 10; // field 10 in case we add more entities to query + // Next field number: 11 +} + +// Artifact properties we can filter by +message ArtifactPropertyFilter { + // oneof because we can add more properties in the future + oneof property { + string artifact_id = 1; + } +} + +// Tag properties we can filter by +message TagPropertyFilter { + oneof property { + string tag_name = 1; + } +} + +// Partition properties we can filter by +message PartitionPropertyFilter { + oneof property { + KeyValuePair key_val = 1; + } +} + +message KeyValuePair { + string key = 1; + string value = 2; +} + +// Dataset properties we can filter by +message DatasetPropertyFilter { + oneof property { + string project = 1; + string name = 2; + string domain = 3; + string version = 4; + // Optional, org key applied to the dataset. + string org = 5; + } +} + +// Pagination options for making list requests +message PaginationOptions { + + // the max number of results to return + uint32 limit = 1; + + // the token to pass to fetch the next page + string token = 2; + + // the property that we want to sort the results by + SortKey sortKey = 3; + + // the sort order of the results + SortOrder sortOrder = 4; + + enum SortOrder { + DESCENDING = 0; + ASCENDING = 1; + } + + enum SortKey { + CREATION_TIME = 0; + } +} diff --git a/flyrs/protos/flyteidl/event/cloudevents.proto b/flyrs/protos/flyteidl/event/cloudevents.proto new file mode 100644 index 0000000000..d02c5ff516 --- /dev/null +++ b/flyrs/protos/flyteidl/event/cloudevents.proto @@ -0,0 +1,73 @@ +syntax = "proto3"; + +package flyteidl.event; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event"; + +import "flyteidl/event/event.proto"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/interface.proto"; +import "flyteidl/core/artifact_id.proto"; +import "flyteidl/core/identifier.proto"; +import "google/protobuf/timestamp.proto"; + +// This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional +// information that downstream consumers may find useful. +message CloudEventWorkflowExecution { + event.WorkflowExecutionEvent raw_event = 1; + + core.TypedInterface output_interface = 2; + + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + repeated core.ArtifactID artifact_ids = 3; + core.WorkflowExecutionIdentifier reference_execution = 4; + string principal = 5; + + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + core.Identifier launch_plan_id = 6; +} + +message CloudEventNodeExecution { + event.NodeExecutionEvent raw_event = 1; + + // The relevant task execution if applicable + core.TaskExecutionIdentifier task_exec_id = 2; + + // The typed interface for the task that produced the event. + core.TypedInterface output_interface = 3; + + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + repeated core.ArtifactID artifact_ids = 4; + string principal = 5; + + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + core.Identifier launch_plan_id = 6; +} + +message CloudEventTaskExecution { + event.TaskExecutionEvent raw_event = 1; +} + +// This event is to be sent by Admin after it creates an execution. +message CloudEventExecutionStart { + // The execution created. + core.WorkflowExecutionIdentifier execution_id = 1; + // The launch plan used. + core.Identifier launch_plan_id = 2; + + core.Identifier workflow_id = 3; + + // Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. + repeated core.ArtifactID artifact_ids = 4; + + // Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. + repeated string artifact_trackers = 5; + + string principal = 6; +} diff --git a/flyrs/protos/flyteidl/event/event.proto b/flyrs/protos/flyteidl/event/event.proto new file mode 100644 index 0000000000..641a3e4dae --- /dev/null +++ b/flyrs/protos/flyteidl/event/event.proto @@ -0,0 +1,315 @@ +syntax = "proto3"; + +package flyteidl.event; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event"; + +import "flyteidl/core/literals.proto"; +import "flyteidl/core/compiler.proto"; +import "flyteidl/core/execution.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/catalog.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/struct.proto"; + + +message WorkflowExecutionEvent { + // Workflow execution id + core.WorkflowExecutionIdentifier execution_id = 1; + + // the id of the originator (Propeller) of the event + string producer_id = 2; + + core.WorkflowExecution.Phase phase = 3; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the workflow. + google.protobuf.Timestamp occurred_at = 4; + + oneof output_result { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 5; + + // Error information for the execution + core.ExecutionError error = 6; + + // Raw output data produced by this workflow execution. + core.LiteralMap output_data = 7; + } +} + +message NodeExecutionEvent { + // Unique identifier for this node execution + core.NodeExecutionIdentifier id = 1; + + // the id of the originator (Propeller) of the event + string producer_id = 2; + + core.NodeExecution.Phase phase = 3; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the node. + google.protobuf.Timestamp occurred_at = 4; + + oneof input_value { + string input_uri = 5; + + // Raw input data consumed by this node execution. + core.LiteralMap input_data = 20; + } + + oneof output_result { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 6; + + // Error information for the execution + core.ExecutionError error = 7; + + // Raw output data produced by this node execution. + core.LiteralMap output_data = 15; + } + + // Additional metadata to do with this event's node target based + // on the node type + oneof target_metadata { + WorkflowNodeMetadata workflow_node_metadata = 8; + TaskNodeMetadata task_node_metadata = 14; + } + + // [To be deprecated] Specifies which task (if any) launched this node. + ParentTaskExecutionMetadata parent_task_metadata = 9; + + // Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + ParentNodeExecutionMetadata parent_node_metadata = 10; + + // Retry group to indicate grouping of nodes by retries + string retry_group = 11; + + // Identifier of the node in the original workflow/graph + // This maps to value of WorkflowTemplate.nodes[X].id + string spec_node_id = 12; + + // Friendly readable name for the node + string node_name = 13; + + int32 event_version = 16; + + // Whether this node launched a subworkflow. + bool is_parent = 17; + + // Whether this node yielded a dynamic workflow. + bool is_dynamic = 18; + + // String location uniquely identifying where the deck HTML file is + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + string deck_uri = 19; + + // This timestamp represents the instant when the event was reported by the executing framework. For example, + // when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + // literal inputs are initially copied. The event however will not be sent until after the copy completes. + // Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + google.protobuf.Timestamp reported_at = 21; + + // Indicates if this node is an ArrayNode. + bool is_array = 22; +} + +// For Workflow Nodes we need to send information about the workflow that's launched +message WorkflowNodeMetadata { + core.WorkflowExecutionIdentifier execution_id = 1; +} + +message TaskNodeMetadata { + // Captures the status of caching for this execution. + core.CatalogCacheStatus cache_status = 1; + // This structure carries the catalog artifact information + core.CatalogMetadata catalog_key = 2; + // Captures the status of cache reservations for this execution. + core.CatalogReservation.Status reservation_status = 3; + // The latest checkpoint location + string checkpoint_uri = 4; + + // In the case this task launched a dynamic workflow we capture its structure here. + DynamicWorkflowNodeMetadata dynamic_workflow = 16; +} + +// For dynamic workflow nodes we send information about the dynamic workflow definition that gets generated. +message DynamicWorkflowNodeMetadata { + // id represents the unique identifier of the workflow. + core.Identifier id = 1; + + // Represents the compiled representation of the embedded dynamic workflow. + core.CompiledWorkflowClosure compiled_workflow = 2; + + // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is + // required to correctly recover partially completed executions where the workflow has already been compiled. + string dynamic_job_spec_uri = 3; +} + +message ParentTaskExecutionMetadata { + core.TaskExecutionIdentifier id = 1; +} + +message ParentNodeExecutionMetadata { + // Unique identifier of the parent node id within the execution + // This is value of core.NodeExecutionIdentifier.node_id of the parent node + string node_id = 1; +} + +message EventReason { + // An explanation for this event + string reason = 1; + + // The time this reason occurred + google.protobuf.Timestamp occurred_at = 2; +} + +// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. +message TaskExecutionEvent { + // ID of the task. In combination with the retryAttempt this will indicate + // the task execution uniquely for a given parent node execution. + core.Identifier task_id = 1; + + // A task execution is always kicked off by a node execution, the event consumer + // will use the parent_id to relate the task to it's parent node execution + core.NodeExecutionIdentifier parent_node_execution_id = 2; + + // retry attempt number for this task, ie., 2 for the second attempt + uint32 retry_attempt = 3; + + // Phase associated with the event + core.TaskExecution.Phase phase = 4; + + // id of the process that sent this event, mainly for trace debugging + string producer_id = 5; + + // log information for the task execution + repeated core.TaskLog logs = 6; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the task. + google.protobuf.Timestamp occurred_at = 7; + + oneof input_value { + // URI of the input file, it encodes all the information + // including Cloud source provider. ie., s3://... + string input_uri = 8; + + // Raw input data consumed by this task execution. + core.LiteralMap input_data = 19; + } + + oneof output_result { + // URI to the output of the execution, it will be in a format that encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 9; + + // Error information for the execution + core.ExecutionError error = 10; + + // Raw output data produced by this task execution. + core.LiteralMap output_data = 17; + } + + // Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + google.protobuf.Struct custom_info = 11; + + // Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + // that should be recorded regardless of the lack of phase change. + // The version field should be incremented when metadata changes across the duration of an individual phase. + uint32 phase_version = 12; + + // An optional explanation for the phase transition. + // Deprecated: Use reasons instead. + string reason = 13 [deprecated = true]; + + // An optional list of explanations for the phase transition. + repeated EventReason reasons = 21; + + // A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + // this type will be identical, but not all task executions necessarily use pre-registered definitions and this + // type is useful to render the task in the UI, filter task executions, etc. + string task_type = 14; + + // Metadata around how a task was executed. + TaskExecutionMetadata metadata = 16; + + // The event version is used to indicate versioned changes in how data is reported using this + // proto message. For example, event_verison > 0 means that maps tasks report logs using the + // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + // in this message. + int32 event_version = 18; + + // This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + // pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + // but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + // facilitates a more accurate portrayal of the evaluation time-series. + google.protobuf.Timestamp reported_at = 20; +} + +// This message contains metadata about external resources produced or used by a specific task execution. +message ExternalResourceInfo { + + // Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + string external_id = 1; + + // A unique index for the external resource with respect to all external resources for this task. Although the + // identifier may change between task reporting events or retries, this will remain the same to enable aggregating + // information from multiple reports. + uint32 index = 2; + + // Retry attempt number for this external resource, ie., 2 for the second attempt + uint32 retry_attempt = 3; + + // Phase associated with the external resource + core.TaskExecution.Phase phase = 4; + + // Captures the status of caching for this external resource execution. + core.CatalogCacheStatus cache_status = 5; + + // log information for the external resource execution + repeated core.TaskLog logs = 6; +} + + +// This message holds task execution metadata specific to resource allocation used to manage concurrent +// executions for a project namespace. +message ResourcePoolInfo { + // Unique resource ID used to identify this execution when allocating a token. + string allocation_token = 1; + + // Namespace under which this task execution requested an allocation token. + string namespace = 2; +} + +// Holds metadata around how a task was executed. +// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, +// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. +// Metadata is a container for these attributes across the task execution lifecycle. +message TaskExecutionMetadata { + + // Unique, generated name for this task execution used by the backend. + string generated_name = 1; + + // Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + repeated ExternalResourceInfo external_resources = 2; + + // Includes additional data on concurrent resource management used during execution.. + // This is a repeated field because a plugin can request multiple resource allocations during execution. + repeated ResourcePoolInfo resource_pool_info = 3; + + // The identifier of the plugin used to execute this task. + string plugin_identifier = 4; + + // Includes the broad category of machine used for this specific task execution. + enum InstanceClass { + // The default instance class configured for the flyte application platform. + DEFAULT = 0; + + // The instance class configured for interruptible tasks. + INTERRUPTIBLE = 1; + } + InstanceClass instance_class = 16; +} diff --git a/flyrs/protos/flyteidl/plugins/array_job.proto b/flyrs/protos/flyteidl/plugins/array_job.proto new file mode 100644 index 0000000000..e202316ef5 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/array_job.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component +// will be executed concurrently. +message ArrayJob { + // Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an + // optimistic restriction and that, due to network partitioning or other failures, the actual number of currently + // running instances might be more. This has to be a positive number if assigned. Default value is size. + int64 parallelism = 1; + + // Defines the number of instances to launch at most. This number should match the size of the input if the job + // requires processing of all input data. This has to be a positive number. + // In the case this is not defined, the back-end will determine the size at run-time by reading the inputs. + int64 size = 2; + + oneof success_criteria { + // An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, + // the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if + // assigned. Default value is size (if specified). + int64 min_successes = 3; + + // If the array job size is not known beforehand, the min_success_ratio can instead be used to determine when an array + // job can be marked successful. + float min_success_ratio = 4; + } +} diff --git a/flyrs/protos/flyteidl/plugins/dask.proto b/flyrs/protos/flyteidl/plugins/dask.proto new file mode 100644 index 0000000000..6c5ecd9daf --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/dask.proto @@ -0,0 +1,41 @@ +syntax = "proto3"; + +import "flyteidl/core/tasks.proto"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + + +// Custom Proto for Dask Plugin. +message DaskJob { + // Spec for the scheduler pod. + DaskScheduler scheduler = 1; + + // Spec of the default worker group. + DaskWorkerGroup workers = 2; +} + +// Specification for the scheduler pod. +message DaskScheduler { + // Optional image to use. If unset, will use the default image. + string image = 1; + + // Resources assigned to the scheduler pod. + core.Resources resources = 2; +} + +message DaskWorkerGroup { + // Number of workers in the group. + uint32 number_of_workers = 1; + + // Optional image to use for the pods of the worker group. If unset, will use the default image. + string image = 2; + + // Resources assigned to the all pods of the worker group. + // As per https://kubernetes.dask.org/en/latest/kubecluster.html?highlight=limit#best-practices + // it is advised to only set limits. If requests are not explicitly set, the plugin will make + // sure to set requests==limits. + // The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit. + core.Resources resources = 3; +} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/common.proto b/flyrs/protos/flyteidl/plugins/kubeflow/common.proto new file mode 100644 index 0000000000..bde59e8b32 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/kubeflow/common.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; + +package flyteidl.plugins.kubeflow; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + + +enum RestartPolicy { + RESTART_POLICY_NEVER = 0; + RESTART_POLICY_ON_FAILURE = 1; + RESTART_POLICY_ALWAYS = 2; +} + +enum CleanPodPolicy { + CLEANPOD_POLICY_NONE = 0; + CLEANPOD_POLICY_RUNNING = 1; + CLEANPOD_POLICY_ALL = 2; +} + +message RunPolicy { + // Defines the policy to kill pods after the job completes. Default to None. + CleanPodPolicy clean_pod_policy = 1; + + // TTL to clean up jobs. Default to infinite. + int32 ttl_seconds_after_finished = 2; + + // Specifies the duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer. + int32 active_deadline_seconds = 3; + + // Number of retries before marking this job failed. + int32 backoff_limit = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto b/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto new file mode 100644 index 0000000000..5da5fb8d6e --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package flyteidl.plugins.kubeflow; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +import "flyteidl/core/tasks.proto"; +import "flyteidl/plugins/kubeflow/common.proto"; + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +message DistributedMPITrainingTask { + // Worker replicas spec + DistributedMPITrainingReplicaSpec worker_replicas = 1; + + // Master replicas spec + DistributedMPITrainingReplicaSpec launcher_replicas = 2; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 3; + + // Number of slots per worker + int32 slots = 4; +} + +// Replica specification for distributed MPI training +message DistributedMPITrainingReplicaSpec { + // Number of replicas + int32 replicas = 1; + + // Image used for the replica group + string image = 2; + + // Resources required for the replica group + core.Resources resources = 3; + + // Restart policy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4; + + // MPI sometimes requires different command set for different replica groups + repeated string command = 5; +} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto b/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto new file mode 100644 index 0000000000..c6838b2d1b --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; + +package flyteidl.plugins.kubeflow; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +import "flyteidl/core/tasks.proto"; +import "flyteidl/plugins/kubeflow/common.proto"; + +// Custom proto for torch elastic config for distributed training using +// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go +message ElasticConfig { + string rdzv_backend = 1; + int32 min_replicas = 2; + int32 max_replicas = 3; + int32 nproc_per_node = 4; + int32 max_restarts = 5; +} + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +message DistributedPyTorchTrainingTask { + // Worker replicas spec + DistributedPyTorchTrainingReplicaSpec worker_replicas = 1; + + // Master replicas spec, master replicas can only have 1 replica + DistributedPyTorchTrainingReplicaSpec master_replicas = 2; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 3; + + // config for an elastic pytorch job + ElasticConfig elastic_config = 4; +} + +message DistributedPyTorchTrainingReplicaSpec { + // Number of replicas + int32 replicas = 1; + + // Image used for the replica group + string image = 2; + + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto b/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto new file mode 100644 index 0000000000..789666b989 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; + +package flyteidl.plugins.kubeflow; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +import "flyteidl/core/tasks.proto"; +import "flyteidl/plugins/kubeflow/common.proto"; + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +message DistributedTensorflowTrainingTask { + // Worker replicas spec + DistributedTensorflowTrainingReplicaSpec worker_replicas = 1; + + // Parameter server replicas spec + DistributedTensorflowTrainingReplicaSpec ps_replicas = 2; + + // Chief replicas spec + DistributedTensorflowTrainingReplicaSpec chief_replicas = 3; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 4; + + // Evaluator replicas spec + DistributedTensorflowTrainingReplicaSpec evaluator_replicas = 5; +} + +message DistributedTensorflowTrainingReplicaSpec { + // Number of replicas + int32 replicas = 1; + + // Image used for the replica group + string image = 2; + + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy Determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/mpi.proto b/flyrs/protos/flyteidl/plugins/mpi.proto new file mode 100644 index 0000000000..69945b7a88 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/mpi.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +message DistributedMPITrainingTask { + // number of worker spawned in the cluster for this job + int32 num_workers = 1; + + // number of launcher replicas spawned in the cluster for this job + // The launcher pod invokes mpirun and communicates with worker pods through MPI. + int32 num_launcher_replicas = 2; + + // number of slots per worker used in hostfile. + // The available slots (GPUs) in each pod. + int32 slots = 3; +} diff --git a/flyrs/protos/flyteidl/plugins/presto.proto b/flyrs/protos/flyteidl/plugins/presto.proto new file mode 100644 index 0000000000..5ff3a8a2e0 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/presto.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field +// of a Presto task's TaskTemplate +message PrestoQuery { + string routing_group = 1; + string catalog = 2; + string schema = 3; + string statement = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/pytorch.proto b/flyrs/protos/flyteidl/plugins/pytorch.proto new file mode 100644 index 0000000000..51972f81c4 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/pytorch.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Custom proto for torch elastic config for distributed training using +// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go +message ElasticConfig { + string rdzv_backend = 1; + int32 min_replicas = 2; + int32 max_replicas = 3; + int32 nproc_per_node = 4; + int32 max_restarts = 5; +} + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +message DistributedPyTorchTrainingTask { + // number of worker replicas spawned in the cluster for this job + int32 workers = 1; + + // config for an elastic pytorch job + // + ElasticConfig elastic_config = 2; +} diff --git a/flyrs/protos/flyteidl/plugins/qubole.proto b/flyrs/protos/flyteidl/plugins/qubole.proto new file mode 100644 index 0000000000..b1faada9f3 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/qubole.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Defines a query to execute on a hive cluster. +message HiveQuery { + string query = 1; + uint32 timeout_sec = 2; + uint32 retryCount = 3; +} + +// Defines a collection of hive queries. +message HiveQueryCollection { + repeated HiveQuery queries = 2; +} + +// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field +// of a hive task's TaskTemplate +message QuboleHiveJob { + string cluster_label = 1; + HiveQueryCollection query_collection = 2 [deprecated=true]; + repeated string tags = 3; + HiveQuery query = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/ray.proto b/flyrs/protos/flyteidl/plugins/ray.proto new file mode 100644 index 0000000000..1afcee8d93 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/ray.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// RayJobSpec defines the desired state of RayJob +message RayJob { + // RayClusterSpec is the cluster template to run the job + RayCluster ray_cluster = 1; + // runtime_env is base64 encoded. + // Ray runtime environments: https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments + string runtime_env = 2; + // shutdown_after_job_finishes specifies whether the RayCluster should be deleted after the RayJob finishes. + bool shutdown_after_job_finishes = 3; + // ttl_seconds_after_finished specifies the number of seconds after which the RayCluster will be deleted after the RayJob finishes. + int32 ttl_seconds_after_finished = 4; +} + +// Define Ray cluster defines the desired state of RayCluster +message RayCluster { + // HeadGroupSpecs are the spec for the head pod + HeadGroupSpec head_group_spec = 1; + // WorkerGroupSpecs are the specs for the worker pods + repeated WorkerGroupSpec worker_group_spec = 2; + // Whether to enable autoscaling. + bool enable_autoscaling = 3; +} + +// HeadGroupSpec are the spec for the head pod +message HeadGroupSpec { + // Optional. RayStartParams are the params of the start command: address, object-store-memory. + // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start + map ray_start_params = 1; +} + +// WorkerGroupSpec are the specs for the worker pods +message WorkerGroupSpec { + // Required. RayCluster can have multiple worker groups, and it distinguishes them by name + string group_name = 1; + // Required. Desired replicas of the worker group. Defaults to 1. + int32 replicas = 2; + // Optional. Min replicas of the worker group. MinReplicas defaults to 1. + int32 min_replicas = 3; + // Optional. Max replicas of the worker group. MaxReplicas defaults to maxInt32 + int32 max_replicas = 4; + // Optional. RayStartParams are the params of the start command: address, object-store-memory. + // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start + map ray_start_params = 5; +} diff --git a/flyrs/protos/flyteidl/plugins/spark.proto b/flyrs/protos/flyteidl/plugins/spark.proto new file mode 100644 index 0000000000..666ea311b2 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/spark.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package flyteidl.plugins; +import "google/protobuf/struct.proto"; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +message SparkApplication { + enum Type { + PYTHON = 0; + JAVA = 1; + SCALA = 2; + R = 3; + } +} + +// Custom Proto for Spark Plugin. +message SparkJob { + SparkApplication.Type applicationType = 1; + string mainApplicationFile = 2; + string mainClass = 3; + map sparkConf = 4; + map hadoopConf = 5; + string executorPath = 6; // Executor path for Python jobs. + // Databricks job configuration. + // Config structure can be found here. https://docs.databricks.com/dev-tools/api/2.0/jobs.html#request-structure. + google.protobuf.Struct databricksConf = 7; + // Databricks access token. https://docs.databricks.com/dev-tools/api/latest/authentication.html + // This token can be set in either flytepropeller or flytekit. + string databricksToken = 8; + // Domain name of your deployment. Use the form .cloud.databricks.com. + // This instance name can be set in either flytepropeller or flytekit. + string databricksInstance = 9; +} diff --git a/flyrs/protos/flyteidl/plugins/tensorflow.proto b/flyrs/protos/flyteidl/plugins/tensorflow.proto new file mode 100644 index 0000000000..b5f2d04561 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/tensorflow.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +message DistributedTensorflowTrainingTask { + // number of worker replicas spawned in the cluster for this job + int32 workers = 1; + // PS -> Parameter server + // number of ps replicas spawned in the cluster for this job + int32 ps_replicas = 2; + // number of chief replicas spawned in the cluster for this job + int32 chief_replicas = 3; + // number of evaluator replicas spawned in the cluster for this job + int32 evaluator_replicas = 4; +} diff --git a/flyrs/protos/flyteidl/plugins/waitable.proto b/flyrs/protos/flyteidl/plugins/waitable.proto new file mode 100644 index 0000000000..dd2138d535 --- /dev/null +++ b/flyrs/protos/flyteidl/plugins/waitable.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +import "flyteidl/core/execution.proto"; +import "flyteidl/core/identifier.proto"; + +package flyteidl.plugins; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; + +// Represents an Execution that was launched and could be waited on. +message Waitable { + core.WorkflowExecutionIdentifier wf_exec_id = 1; + core.WorkflowExecution.Phase phase = 2; + string workflow_id = 3; +} diff --git a/flyrs/protos/flyteidl/service/admin.proto b/flyrs/protos/flyteidl/service/admin.proto new file mode 100644 index 0000000000..2004842b0b --- /dev/null +++ b/flyrs/protos/flyteidl/service/admin.proto @@ -0,0 +1,659 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "flyteidl/admin/project.proto"; +import "flyteidl/admin/project_domain_attributes.proto"; +import "flyteidl/admin/project_attributes.proto"; +import "flyteidl/admin/task.proto"; +import "flyteidl/admin/workflow.proto"; +import "flyteidl/admin/workflow_attributes.proto"; +import "flyteidl/admin/launch_plan.proto"; +import "flyteidl/admin/event.proto"; +import "flyteidl/admin/execution.proto"; +import "flyteidl/admin/matchable_resource.proto"; +import "flyteidl/admin/node_execution.proto"; +import "flyteidl/admin/task_execution.proto"; +import "flyteidl/admin/version.proto"; +import "flyteidl/admin/common.proto"; +import "flyteidl/admin/description_entity.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; + + +// The following defines an RPC service that is also served over HTTP via grpc-gateway. +// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go +service AdminService { + // Create and upload a :ref:`ref_flyteidl.admin.Task` definition + rpc CreateTask (flyteidl.admin.TaskCreateRequest) returns (flyteidl.admin.TaskCreateResponse) { + option (google.api.http) = { + post: "/api/v1/tasks" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create and register a task definition." + responses: { + key: "400" + value: { + description: "Returned for bad request that may have failed validation." + } + } + responses: { + key: "409" + value: { + description: "Returned for a request that references an identical entity that has already been registered." + } + } + }; + } + + // Fetch a :ref:`ref_flyteidl.admin.Task` definition. + rpc GetTask (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.Task) { + option (google.api.http) = { + get: "/api/v1/tasks/{id.project}/{id.domain}/{id.name}/{id.version}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing task definition." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. + rpc ListTaskIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { + option (google.api.http) = { + get: "/api/v1/task_ids/{project}/{domain}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing task definition identifiers matching input filters." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. + rpc ListTasks (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.TaskList) { + option (google.api.http) = { + get: "/api/v1/tasks/{id.project}/{id.domain}/{id.name}" + additional_bindings { + get: "/api/v1/tasks/{id.project}/{id.domain}", + } + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing task definitions matching input filters." + }; + } + + // Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition + rpc CreateWorkflow (flyteidl.admin.WorkflowCreateRequest) returns (flyteidl.admin.WorkflowCreateResponse) { + option (google.api.http) = { + post: "/api/v1/workflows" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create and register a workflow definition." + responses: { + key: "400" + value: { + description: "Returned for bad request that may have failed validation." + } + } + responses: { + key: "409" + value: { + description: "Returned for a request that references an identical entity that has already been registered." + } + } + }; + } + + // Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. + rpc GetWorkflow (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.Workflow) { + option (google.api.http) = { + get: "/api/v1/workflows/{id.project}/{id.domain}/{id.name}/{id.version}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing workflow definition." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. + rpc ListWorkflowIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { + option (google.api.http) = { + get: "/api/v1/workflow_ids/{project}/{domain}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Fetch an existing workflow definition identifiers matching input filters." + // }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. + rpc ListWorkflows (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.WorkflowList) { + option (google.api.http) = { + get: "/api/v1/workflows/{id.project}/{id.domain}/{id.name}" + additional_bindings { + get: "/api/v1/workflows/{id.project}/{id.domain}", + } + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing workflow definitions matching input filters." + }; + } + + // Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition + rpc CreateLaunchPlan (flyteidl.admin.LaunchPlanCreateRequest) returns (flyteidl.admin.LaunchPlanCreateResponse) { + option (google.api.http) = { + post: "/api/v1/launch_plans" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create and register a launch plan definition." + responses: { + key: "400" + value: { + description: "Returned for bad request that may have failed validation." + } + } + responses: { + key: "409" + value: { + description: "Returned for a request that references an identical entity that has already been registered." + } + } + }; + } + + // Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. + rpc GetLaunchPlan (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.LaunchPlan) { + option (google.api.http) = { + get: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}/{id.version}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing launch plan definition." + }; + } + + // Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. + rpc GetActiveLaunchPlan (flyteidl.admin.ActiveLaunchPlanRequest) returns (flyteidl.admin.LaunchPlan) { + option (google.api.http) = { + get: "/api/v1/active_launch_plans/{id.project}/{id.domain}/{id.name}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve the active launch plan version specified by input request filters." + }; + } + + // List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. + rpc ListActiveLaunchPlans (flyteidl.admin.ActiveLaunchPlanListRequest) returns (flyteidl.admin.LaunchPlanList) { + option (google.api.http) = { + get: "/api/v1/active_launch_plans/{project}/{domain}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch the active launch plan versions specified by input request filters." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. + rpc ListLaunchPlanIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { + option (google.api.http) = { + get: "/api/v1/launch_plan_ids/{project}/{domain}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing launch plan definition identifiers matching input filters." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. + rpc ListLaunchPlans (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.LaunchPlanList) { + option (google.api.http) = { + get: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}" + additional_bindings { + get: "/api/v1/launch_plans/{id.project}/{id.domain}" + } + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing launch plan definitions matching input filters." + }; + } + + // Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. + rpc UpdateLaunchPlan (flyteidl.admin.LaunchPlanUpdateRequest) returns (flyteidl.admin.LaunchPlanUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}/{id.version}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update the status of an existing launch plan definition. " + "At most one launch plan version for a given {project, domain, name} can be active at a time. " + "If this call sets a launch plan to active and existing version is already active, the result of this call will be that the " + "formerly active launch plan will be made inactive and specified launch plan in this request will be made active. " + "In the event that the formerly active launch plan had a schedule associated it with it, this schedule will be disabled. " + "If the reference launch plan in this request is being set to active and has a schedule associated with it, the schedule will be enabled." + }; + } + + // Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` + rpc CreateExecution (flyteidl.admin.ExecutionCreateRequest) returns (flyteidl.admin.ExecutionCreateResponse) { + option (google.api.http) = { + post: "/api/v1/executions" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create a workflow execution." + }; + } + + // Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` + rpc RelaunchExecution (flyteidl.admin.ExecutionRelaunchRequest) returns (flyteidl.admin.ExecutionCreateResponse) { + option (google.api.http) = { + post: "/api/v1/executions/relaunch" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Relaunch a workflow execution." + }; + } + + // Recreates a previously-run workflow execution that will only start executing from the last known failure point. + // In Recover mode, users cannot change any input parameters or update the version of the execution. + // This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, + // downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. + // See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. + rpc RecoverExecution (flyteidl.admin.ExecutionRecoverRequest) returns (flyteidl.admin.ExecutionCreateResponse) { + option (google.api.http) = { + post: "/api/v1/executions/recover" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Recreates a previously-run workflow execution that will only start executing from the last known failure point. " + "In Recover mode, users cannot change any input parameters or update the version of the execution. " + "This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, " + "downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again." + }; + } + + // Fetches a :ref:`ref_flyteidl.admin.Execution`. + rpc GetExecution (flyteidl.admin.WorkflowExecutionGetRequest) returns (flyteidl.admin.Execution) { + option (google.api.http) = { + get: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing workflow execution." + }; + } + + // Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. + rpc UpdateExecution (flyteidl.admin.ExecutionUpdateRequest) returns (flyteidl.admin.ExecutionUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" + body: "*" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Update execution belonging to project domain." + // }; + } + + // Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. + rpc GetExecutionData (flyteidl.admin.WorkflowExecutionGetDataRequest) returns (flyteidl.admin.WorkflowExecutionGetDataResponse) { + option (google.api.http) = { + get: "/api/v1/data/executions/{id.project}/{id.domain}/{id.name}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Retrieve input and output data from an existing workflow execution." + // }; + }; + + // Fetch a list of :ref:`ref_flyteidl.admin.Execution`. + rpc ListExecutions (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.ExecutionList) { + option (google.api.http) = { + get: "/api/v1/executions/{id.project}/{id.domain}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Fetch existing workflow executions matching input filters." + // }; + } + + // Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. + rpc TerminateExecution (flyteidl.admin.ExecutionTerminateRequest) returns (flyteidl.admin.ExecutionTerminateResponse) { + option (google.api.http) = { + delete: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" + body: "*" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Terminate the active workflow execution specified in the request." + // }; + } + + // Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. + rpc GetNodeExecution (flyteidl.admin.NodeExecutionGetRequest) returns (flyteidl.admin.NodeExecution) { + option (google.api.http) = { + get: "/api/v1/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Retrieve an existing node execution." + // }; + } + + // Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. + rpc GetDynamicNodeWorkflow (flyteidl.admin.GetDynamicNodeWorkflowRequest) returns (flyteidl.admin.DynamicNodeWorkflowResponse) { + option (google.api.http) = { + get: "/api/v1/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}/dynamic_workflow" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Retrieve a workflow closure from a dynamic node execution." + // }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. + rpc ListNodeExecutions (flyteidl.admin.NodeExecutionListRequest) returns (flyteidl.admin.NodeExecutionList) { + option (google.api.http) = { + get: "/api/v1/node_executions/{workflow_execution_id.project}/{workflow_execution_id.domain}/{workflow_execution_id.name}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Fetch existing node executions matching input filters." + // }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. + rpc ListNodeExecutionsForTask (flyteidl.admin.NodeExecutionForTaskListRequest) returns (flyteidl.admin.NodeExecutionList) { + option (google.api.http) = { + get: "/api/v1/children/task_executions/{task_execution_id.node_execution_id.execution_id.project}/{task_execution_id.node_execution_id.execution_id.domain}/{task_execution_id.node_execution_id.execution_id.name}/{task_execution_id.node_execution_id.node_id}/{task_execution_id.task_id.project}/{task_execution_id.task_id.domain}/{task_execution_id.task_id.name}/{task_execution_id.task_id.version}/{task_execution_id.retry_attempt}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Fetch child node executions launched by the specified task execution." + // }; + } + + // Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. + rpc GetNodeExecutionData (flyteidl.admin.NodeExecutionGetDataRequest) returns (flyteidl.admin.NodeExecutionGetDataResponse) { + option (google.api.http) = { + get: "/api/v1/data/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Retrieve input and output data from an existing node execution." + // }; + }; + + // Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. + rpc RegisterProject (flyteidl.admin.ProjectRegisterRequest) returns (flyteidl.admin.ProjectRegisterResponse) { + option (google.api.http) = { + post: "/api/v1/projects" + body: "*" + }; + // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + // description: "Register a project." + // }; + } + + // Updates an existing :ref:`ref_flyteidl.admin.Project` + // flyteidl.admin.Project should be passed but the domains property should be empty; + // it will be ignored in the handler as domains cannot be updated via this API. + rpc UpdateProject (flyteidl.admin.Project) returns (flyteidl.admin.ProjectUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/projects/{id}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update a project." + }; + } + + // Fetches a :ref:`ref_flyteidl.admin.Project` + rpc GetProject (flyteidl.admin.ProjectGetRequest) returns (flyteidl.admin.Project) { + option (google.api.http) = { + get: "/api/v1/projects/{id}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch a registered project." + }; + } + + // Fetches a list of :ref:`ref_flyteidl.admin.Project` + rpc ListProjects (flyteidl.admin.ProjectListRequest) returns (flyteidl.admin.Projects) { + option (google.api.http) = { + get: "/api/v1/projects" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch registered projects." + }; + } + + // Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. + rpc CreateWorkflowEvent (flyteidl.admin.WorkflowExecutionEventRequest) returns (flyteidl.admin.WorkflowExecutionEventResponse) { + option (google.api.http) = { + post: "/api/v1/events/workflows" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create a workflow execution event recording a phase transition." + }; + } + + // Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. + rpc CreateNodeEvent (flyteidl.admin.NodeExecutionEventRequest) returns (flyteidl.admin.NodeExecutionEventResponse) { + option (google.api.http) = { + post: "/api/v1/events/nodes" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create a node execution event recording a phase transition." + }; + } + + // Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. + rpc CreateTaskEvent (flyteidl.admin.TaskExecutionEventRequest) returns (flyteidl.admin.TaskExecutionEventResponse) { + option (google.api.http) = { + post: "/api/v1/events/tasks" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Create a task execution event recording a phase transition." + }; + } + + // Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. + rpc GetTaskExecution (flyteidl.admin.TaskExecutionGetRequest) returns (flyteidl.admin.TaskExecution) { + option (google.api.http) = { + get: "/api/v1/task_executions/{id.node_execution_id.execution_id.project}/{id.node_execution_id.execution_id.domain}/{id.node_execution_id.execution_id.name}/{id.node_execution_id.node_id}/{id.task_id.project}/{id.task_id.domain}/{id.task_id.name}/{id.task_id.version}/{id.retry_attempt}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing task execution." + }; + } + + // Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. + rpc ListTaskExecutions (flyteidl.admin.TaskExecutionListRequest) returns (flyteidl.admin.TaskExecutionList) { + option (google.api.http) = { + get: "/api/v1/task_executions/{node_execution_id.execution_id.project}/{node_execution_id.execution_id.domain}/{node_execution_id.execution_id.name}/{node_execution_id.node_id}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing task executions matching input filters." + }; + + } + + // Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. + rpc GetTaskExecutionData (flyteidl.admin.TaskExecutionGetDataRequest) returns (flyteidl.admin.TaskExecutionGetDataResponse) { + option (google.api.http) = { + get: "/api/v1/data/task_executions/{id.node_execution_id.execution_id.project}/{id.node_execution_id.execution_id.domain}/{id.node_execution_id.execution_id.name}/{id.node_execution_id.node_id}/{id.task_id.project}/{id.task_id.domain}/{id.task_id.name}/{id.task_id.version}/{id.retry_attempt}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve input and output data from an existing task execution." + }; + } + + // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + rpc UpdateProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesUpdateRequest) returns (flyteidl.admin.ProjectDomainAttributesUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/project_domain_attributes/{attributes.project}/{attributes.domain}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update the customized resource attributes associated with a project-domain combination" + }; + } + + // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + rpc GetProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesGetRequest) returns (flyteidl.admin.ProjectDomainAttributesGetResponse) { + option (google.api.http) = { + get: "/api/v1/project_domain_attributes/{project}/{domain}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve the customized resource attributes associated with a project-domain combination" + }; + } + + // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + rpc DeleteProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesDeleteRequest) returns (flyteidl.admin.ProjectDomainAttributesDeleteResponse) { + option (google.api.http) = { + delete: "/api/v1/project_domain_attributes/{project}/{domain}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Delete the customized resource attributes associated with a project-domain combination" + }; + } + + // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level + rpc UpdateProjectAttributes (flyteidl.admin.ProjectAttributesUpdateRequest) returns (flyteidl.admin.ProjectAttributesUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/project_attributes/{attributes.project}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update the customized resource attributes associated with a project" + }; + } + + // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + rpc GetProjectAttributes (flyteidl.admin.ProjectAttributesGetRequest) returns (flyteidl.admin.ProjectAttributesGetResponse) { + option (google.api.http) = { + get: "/api/v1/project_attributes/{project}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve the customized resource attributes associated with a project" + }; + } + + // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + rpc DeleteProjectAttributes (flyteidl.admin.ProjectAttributesDeleteRequest) returns (flyteidl.admin.ProjectAttributesDeleteResponse) { + option (google.api.http) = { + delete: "/api/v1/project_attributes/{project}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Delete the customized resource attributes associated with a project" + }; + } + // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + rpc UpdateWorkflowAttributes (flyteidl.admin.WorkflowAttributesUpdateRequest) returns (flyteidl.admin.WorkflowAttributesUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/workflow_attributes/{attributes.project}/{attributes.domain}/{attributes.workflow}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update the customized resource attributes associated with a project, domain and workflow combination" + }; + } + + // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + rpc GetWorkflowAttributes (flyteidl.admin.WorkflowAttributesGetRequest) returns (flyteidl.admin.WorkflowAttributesGetResponse) { + option (google.api.http) = { + get: "/api/v1/workflow_attributes/{project}/{domain}/{workflow}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve the customized resource attributes associated with a project, domain and workflow combination" + }; + } + + // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + rpc DeleteWorkflowAttributes (flyteidl.admin.WorkflowAttributesDeleteRequest) returns (flyteidl.admin.WorkflowAttributesDeleteResponse) { + option (google.api.http) = { + delete: "/api/v1/workflow_attributes/{project}/{domain}/{workflow}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Delete the customized resource attributes associated with a project, domain and workflow combination" + }; + } + + // Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. + rpc ListMatchableAttributes (flyteidl.admin.ListMatchableAttributesRequest) returns (flyteidl.admin.ListMatchableAttributesResponse) { + option (google.api.http) = { + get: "/api/v1/matchable_attributes" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve a list of MatchableAttributesConfiguration objects." + }; + } + + // Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. + rpc ListNamedEntities (flyteidl.admin.NamedEntityListRequest) returns (flyteidl.admin.NamedEntityList) { + option (google.api.http) = { + get: "/api/v1/named_entities/{resource_type}/{project}/{domain}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve a list of NamedEntity objects sharing a common resource type, project, and domain." + }; + } + + // Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. + rpc GetNamedEntity (flyteidl.admin.NamedEntityGetRequest) returns (flyteidl.admin.NamedEntity) { + option (google.api.http) = { + get: "/api/v1/named_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve a NamedEntity object." + }; + } + + // Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. + rpc UpdateNamedEntity (flyteidl.admin.NamedEntityUpdateRequest) returns (flyteidl.admin.NamedEntityUpdateResponse) { + option (google.api.http) = { + put: "/api/v1/named_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Update the fields associated with a NamedEntity" + }; + } + + rpc GetVersion (flyteidl.admin.GetVersionRequest) returns (flyteidl.admin.GetVersionResponse) { + option (google.api.http) = { + get: "/api/v1/version" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve the Version (including the Build information) for FlyteAdmin service" + }; + } + + // Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. + rpc GetDescriptionEntity (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.DescriptionEntity) { + option (google.api.http) = { + get: "/api/v1/description_entities/{id.resource_type}/{id.project}/{id.domain}/{id.name}/{id.version}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve an existing description entity description." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. + rpc ListDescriptionEntities (flyteidl.admin.DescriptionEntityListRequest) returns (flyteidl.admin.DescriptionEntityList) { + option (google.api.http) = { + get: "/api/v1/description_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" + additional_bindings { + get: "/api/v1/description_entities/{resource_type}/{id.project}/{id.domain}" + } + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing description entity definitions matching input filters." + }; + } + + // Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. + rpc GetExecutionMetrics (flyteidl.admin.WorkflowExecutionGetMetricsRequest) returns (flyteidl.admin.WorkflowExecutionGetMetricsResponse) { + option (google.api.http) = { + get: "/api/v1/metrics/executions/{id.project}/{id.domain}/{id.name}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve metrics from an existing workflow execution." + }; + }; +} diff --git a/flyrs/protos/flyteidl/service/agent.proto b/flyrs/protos/flyteidl/service/agent.proto new file mode 100644 index 0000000000..cd6b93a972 --- /dev/null +++ b/flyrs/protos/flyteidl/service/agent.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "flyteidl/admin/agent.proto"; + +// SyncAgentService defines an RPC Service that allows propeller to send the request to the agent server synchronously. +service SyncAgentService { + // ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. + rpc ExecuteTaskSync (stream flyteidl.admin.ExecuteTaskSyncRequest) returns (stream flyteidl.admin.ExecuteTaskSyncResponse){ + option (google.api.http) = { + post: "/api/v1/agent/task/stream" + body: "*" + }; + }; +} + +// AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. +service AsyncAgentService { + // CreateTask sends a task create request to the agent service. + rpc CreateTask (flyteidl.admin.CreateTaskRequest) returns (flyteidl.admin.CreateTaskResponse){ + option (google.api.http) = { + post: "/api/v1/agent/task" + body: "*" + }; + }; + + // Get job status. + rpc GetTask (flyteidl.admin.GetTaskRequest) returns (flyteidl.admin.GetTaskResponse){ + option (google.api.http) = { + get: "/api/v1/agent/task/{task_category.name}/{task_category.version}/{resource_meta}" + }; + }; + + // Delete the task resource. + rpc DeleteTask (flyteidl.admin.DeleteTaskRequest) returns (flyteidl.admin.DeleteTaskResponse){ + option (google.api.http) = { + delete: "/api/v1/agent/task_executions/{task_category.name}/{task_category.version}/{resource_meta}" + }; + }; + + // GetTaskMetrics returns one or more task execution metrics, if available. + // + // Errors include + // * OutOfRange if metrics are not available for the specified task time range + // * various other errors + rpc GetTaskMetrics(flyteidl.admin.GetTaskMetricsRequest) returns (flyteidl.admin.GetTaskMetricsResponse){ + option (google.api.http) = { + get: "/api/v1/agent/task/metrics/{task_category.name}/{task_category.version}/{resource_meta}" + }; + }; + + // GetTaskLogs returns task execution logs, if available. + rpc GetTaskLogs(flyteidl.admin.GetTaskLogsRequest) returns (stream flyteidl.admin.GetTaskLogsResponse){ + option (google.api.http) = { + get: "/api/v1/agent/task/logs/{task_category.name}/{task_category.version}/{resource_meta}" + }; + }; +} + +// AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. +// This service allows propeller or users to get the metadata of agents. +service AgentMetadataService { + // Fetch a :ref:`ref_flyteidl.admin.Agent` definition. + rpc GetAgent (flyteidl.admin.GetAgentRequest) returns (flyteidl.admin.GetAgentResponse){ + option (google.api.http) = { + get: "/api/v1/agent/{name}" + }; + }; + + // Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. + rpc ListAgents (flyteidl.admin.ListAgentsRequest) returns (flyteidl.admin.ListAgentsResponse){ + option (google.api.http) = { + get: "/api/v1/agents" + }; + }; +} diff --git a/flyrs/protos/flyteidl/service/auth.proto b/flyrs/protos/flyteidl/service/auth.proto new file mode 100644 index 0000000000..a340f05add --- /dev/null +++ b/flyrs/protos/flyteidl/service/auth.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; + +message OAuth2MetadataRequest {} + +// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata +// as defined in https://tools.ietf.org/html/rfc8414 +message OAuth2MetadataResponse { + // Defines the issuer string in all JWT tokens this server issues. The issuer can be admin itself or an external + // issuer. + string issuer = 1; + + // URL of the authorization server's authorization endpoint [RFC6749]. This is REQUIRED unless no grant types are + // supported that use the authorization endpoint. + string authorization_endpoint = 2; + + // URL of the authorization server's token endpoint [RFC6749]. + string token_endpoint = 3; + + // Array containing a list of the OAuth 2.0 response_type values that this authorization server supports. + repeated string response_types_supported = 4; + + // JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this authorization server supports. + repeated string scopes_supported = 5; + + // JSON array containing a list of client authentication methods supported by this token endpoint. + repeated string token_endpoint_auth_methods_supported = 6; + + // URL of the authorization server's JWK Set [JWK] document. The referenced document contains the signing key(s) the + // client uses to validate signatures from the authorization server. + string jwks_uri = 7; + + // JSON array containing a list of Proof Key for Code Exchange (PKCE) [RFC7636] code challenge methods supported by + // this authorization server. + repeated string code_challenge_methods_supported = 8; + + // JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. + repeated string grant_types_supported = 9; + + // URL of the authorization server's device authorization endpoint, as defined in Section 3.1 of [RFC8628] + string device_authorization_endpoint = 10; +} + +message PublicClientAuthConfigRequest {} + +// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. +message PublicClientAuthConfigResponse { + // client_id to use when initiating OAuth2 authorization requests. + string client_id = 1; + // redirect uri to use when initiating OAuth2 authorization requests. + string redirect_uri = 2; + // scopes to request when initiating OAuth2 authorization requests. + repeated string scopes = 3; + // Authorization Header to use when passing Access Tokens to the server. If not provided, the client should use the + // default http `Authorization` header. + string authorization_metadata_key = 4; + // ServiceHttpEndpoint points to the http endpoint for the backend. If empty, clients can assume the endpoint used + // to configure the gRPC connection can be used for the http one respecting the insecure flag to choose between + // SSL or no SSL connections. + string service_http_endpoint = 5; + // audience to use when initiating OAuth2 authorization requests. + string audience = 6; +} + +// The following defines an RPC service that is also served over HTTP via grpc-gateway. +// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go +// RPCs defined in this service must be anonymously accessible. +service AuthMetadataService { + // Anonymously accessible. Retrieves local or external oauth authorization server metadata. + rpc GetOAuth2Metadata (OAuth2MetadataRequest) returns (OAuth2MetadataResponse) { + option (google.api.http) = { + get: "/.well-known/oauth-authorization-server" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieves OAuth2 authorization server metadata. This endpoint is anonymously accessible." + }; + } + + // Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization + // requests. + rpc GetPublicClientConfig (PublicClientAuthConfigRequest) returns (PublicClientAuthConfigResponse) { + option (google.api.http) = { + get: "/config/v1/flyte_client" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieves public flyte client info. This endpoint is anonymously accessible." + }; + } +} diff --git a/flyrs/protos/flyteidl/service/dataproxy.proto b/flyrs/protos/flyteidl/service/dataproxy.proto new file mode 100644 index 0000000000..86c7c4d977 --- /dev/null +++ b/flyrs/protos/flyteidl/service/dataproxy.proto @@ -0,0 +1,205 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "flyteidl/core/identifier.proto"; +import "flyteidl/core/literals.proto"; + + +message CreateUploadLocationResponse { + // SignedUrl specifies the url to use to upload content to (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) + string signed_url = 1; + + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + string native_url = 2; + + // ExpiresAt defines when will the signed URL expires. + google.protobuf.Timestamp expires_at = 3; + + // Data proxy generates these headers for client, and they have to add these headers to the request when uploading the file. + map headers = 4; +} + +// CreateUploadLocationRequest specified request for the CreateUploadLocation API. +// The implementation in data proxy service will create the s3 location with some server side configured prefixes, +// and then: +// - project/domain/(a deterministic str representation of the content_md5)/filename (if present); OR +// - project/domain/filename_root (if present)/filename (if present). +message CreateUploadLocationRequest { + // Project to create the upload location for + // +required + string project = 1; + + // Domain to create the upload location for. + // +required + string domain = 2; + + // Filename specifies a desired suffix for the generated location. E.g. `file.py` or `pre/fix/file.zip`. + // +optional. By default, the service will generate a consistent name based on the provided parameters. + string filename = 3; + + // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + // exceeds the platform allowed max. + // +optional. The default value comes from a global config. + google.protobuf.Duration expires_in = 4; + + // ContentMD5 restricts the upload location to the specific MD5 provided. The ContentMD5 will also appear in the + // generated path. + // +required + bytes content_md5 = 5; + + // If present, data proxy will use this string in lieu of the md5 hash in the path. When the filename is also included + // this makes the upload location deterministic. The native url will still be prefixed by the upload location prefix + // in data proxy config. This option is useful when uploading multiple files. + // +optional + string filename_root = 6; + + // If true, the data proxy will add content_md5 to the metadata to the signed URL and + // it will force clients to add this metadata to the object. + // This make sure dataproxy is backward compatible with the old flytekit. + bool add_content_md5_metadata = 7; + + + // Optional, org key applied to the resource. + string org = 8; +} + +// CreateDownloadLocationRequest specified request for the CreateDownloadLocation API. +message CreateDownloadLocationRequest { + option deprecated = true; + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + string native_url = 1; + + // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + // exceeds the platform allowed max. + // +optional. The default value comes from a global config. + google.protobuf.Duration expires_in = 2; +} + +message CreateDownloadLocationResponse { + option deprecated = true; + // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) + string signed_url = 1; + // ExpiresAt defines when will the signed URL expires. + google.protobuf.Timestamp expires_at = 2; +} + +// ArtifactType +enum ArtifactType { + // ARTIFACT_TYPE_UNDEFINED is the default, often invalid, value for the enum. + ARTIFACT_TYPE_UNDEFINED = 0; + + // ARTIFACT_TYPE_DECK refers to the deck html file optionally generated after a task, a workflow or a launch plan + // finishes executing. + ARTIFACT_TYPE_DECK = 1; +} + +// CreateDownloadLinkRequest defines the request parameters to create a download link (signed url) +message CreateDownloadLinkRequest { + // ArtifactType of the artifact requested. + ArtifactType artifact_type = 1; + + // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + // exceeds the platform allowed max. + // +optional. The default value comes from a global config. + google.protobuf.Duration expires_in = 2; + + oneof source { + // NodeId is the unique identifier for the node execution. For a task node, this will retrieve the output of the + // most recent attempt of the task. + core.NodeExecutionIdentifier node_execution_id = 3; + } +} + +// CreateDownloadLinkResponse defines the response for the generated links +message CreateDownloadLinkResponse { + // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) + repeated string signed_url = 1 [deprecated = true]; + + // ExpiresAt defines when will the signed URL expire. + google.protobuf.Timestamp expires_at = 2 [deprecated = true]; + + // New wrapper object containing the signed urls and expiration time + PreSignedURLs pre_signed_urls = 3; +} + +// Wrapper object since the message is shared across this and the GetDataResponse +message PreSignedURLs { + // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) + repeated string signed_url = 1; + + // ExpiresAt defines when will the signed URL expire. + google.protobuf.Timestamp expires_at = 2; +} + +// General request artifact to retrieve data from a Flyte artifact url. +message GetDataRequest { + // A unique identifier in the form of flyte:// that uniquely, for a given Flyte + // backend, identifies a Flyte artifact ([i]nput, [o]output, flyte [d]eck, etc.). + // e.g. flyte://v1/proj/development/execid/n2/0/i (for 0th task execution attempt input) + // flyte://v1/proj/development/execid/n2/i (for node execution input) + // flyte://v1/proj/development/execid/n2/o/o3 (the o3 output of the second node) + string flyte_url = 1; +} + +message GetDataResponse { + oneof data { + // literal map data will be returned + core.LiteralMap literal_map = 1; + + // Flyte deck html will be returned as a signed url users can download + PreSignedURLs pre_signed_urls = 2; + + // Single literal will be returned. This is returned when the user/url requests a specific output or input + // by name. See the o3 example above. + core.Literal literal = 3; + } +} + +// DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. +service DataProxyService { + // CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. + rpc CreateUploadLocation (CreateUploadLocationRequest) returns (CreateUploadLocationResponse) { + option (google.api.http) = { + post: "/api/v1/dataproxy/artifact_urn" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Creates a write-only http location that is accessible for tasks at runtime." + }; + } + + // CreateDownloadLocation creates a signed url to download artifacts. + rpc CreateDownloadLocation (CreateDownloadLocationRequest) returns (CreateDownloadLocationResponse) { + option deprecated = true; + option (google.api.http) = { + get: "/api/v1/dataproxy/artifact_urn" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Deprecated: Please use CreateDownloadLink instead. Creates a read-only http location that is accessible for tasks at runtime." + }; + } + + // CreateDownloadLocation creates a signed url to download artifacts. + rpc CreateDownloadLink (CreateDownloadLinkRequest) returns (CreateDownloadLinkResponse) { + option (google.api.http) = { + post: "/api/v1/dataproxy/artifact_link" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Creates a read-only http location that is accessible for tasks at runtime." + }; + } + + rpc GetData (GetDataRequest) returns (GetDataResponse) { + // Takes an address like flyte://v1/proj/development/execid/n2/0/i and return the actual data + option (google.api.http) = { + get: "/api/v1/data" + }; + } +} diff --git a/flyrs/protos/flyteidl/service/external_plugin_service.proto b/flyrs/protos/flyteidl/service/external_plugin_service.proto new file mode 100644 index 0000000000..a3035290e2 --- /dev/null +++ b/flyrs/protos/flyteidl/service/external_plugin_service.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; +import "flyteidl/core/literals.proto"; +import "flyteidl/core/tasks.proto"; + +// ExternalPluginService defines an RPC Service that allows propeller to send the request to the backend plugin server. +service ExternalPluginService { + // Send a task create request to the backend plugin server. + rpc CreateTask (TaskCreateRequest) returns (TaskCreateResponse){option deprecated = true;}; + // Get job status. + rpc GetTask (TaskGetRequest) returns (TaskGetResponse){option deprecated = true;}; + // Delete the task resource. + rpc DeleteTask (TaskDeleteRequest) returns (TaskDeleteResponse){option deprecated = true;}; +} + +// The state of the execution is used to control its visibility in the UI/CLI. +enum State { + option deprecated = true; + RETRYABLE_FAILURE = 0; + PERMANENT_FAILURE = 1; + PENDING = 2; + RUNNING = 3; + SUCCEEDED = 4; +} + +// Represents a request structure to create task. +message TaskCreateRequest { + option deprecated = true; + // The inputs required to start the execution. All required inputs must be + // included in this map. If not required and not provided, defaults apply. + // +optional + core.LiteralMap inputs = 1; + // Template of the task that encapsulates all the metadata of the task. + core.TaskTemplate template = 2; + // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + string output_prefix = 3; +} + +// Represents a create response structure. +message TaskCreateResponse { + option deprecated = true; + string job_id = 1; +} + +// A message used to fetch a job state from backend plugin server. +message TaskGetRequest { + option deprecated = true; + // A predefined yet extensible Task type identifier. + string task_type = 1; + // The unique id identifying the job. + string job_id = 2; +} + +// Response to get an individual task state. +message TaskGetResponse { + option deprecated = true; + // The state of the execution is used to control its visibility in the UI/CLI. + State state = 1; + // The outputs of the execution. It's typically used by sql task. Flyteplugins service will create a + // Structured dataset pointing to the query result table. + // +optional + core.LiteralMap outputs = 2; +} + +// A message used to delete a task. +message TaskDeleteRequest { + option deprecated = true; + // A predefined yet extensible Task type identifier. + string task_type = 1; + // The unique id identifying the job. + string job_id = 2; +} + +// Response to delete a task. +message TaskDeleteResponse { + option deprecated = true; +} diff --git a/flyrs/protos/flyteidl/service/identity.proto b/flyrs/protos/flyteidl/service/identity.proto new file mode 100644 index 0000000000..244bb9aaeb --- /dev/null +++ b/flyrs/protos/flyteidl/service/identity.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "google/protobuf/struct.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; + +message UserInfoRequest {} + +// See the OpenID Connect spec at https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse for more information. +message UserInfoResponse { + // Locally unique and never reassigned identifier within the Issuer for the End-User, which is intended to be consumed + // by the Client. + string subject = 1; + + // Full name + string name = 2; + + // Shorthand name by which the End-User wishes to be referred to + string preferred_username = 3; + + // Given name(s) or first name(s) + string given_name = 4; + + // Surname(s) or last name(s) + string family_name = 5; + + // Preferred e-mail address + string email = 6; + + // Profile picture URL + string picture = 7; + + // Additional claims + google.protobuf.Struct additional_claims = 8; +} + +// IdentityService defines an RPC Service that interacts with user/app identities. +service IdentityService { + // Retrieves user information about the currently logged in user. + rpc UserInfo (UserInfoRequest) returns (UserInfoResponse) { + option (google.api.http) = { + get: "/me" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieves authenticated identity info." + }; + } +} diff --git a/flyrs/protos/flyteidl/service/signal.proto b/flyrs/protos/flyteidl/service/signal.proto new file mode 100644 index 0000000000..b1b927979b --- /dev/null +++ b/flyrs/protos/flyteidl/service/signal.proto @@ -0,0 +1,55 @@ +syntax = "proto3"; +package flyteidl.service; + +option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; + +import "google/api/annotations.proto"; +import "flyteidl/admin/signal.proto"; +import "protoc-gen-openapiv2/options/annotations.proto"; + +// SignalService defines an RPC Service that may create, update, and retrieve signal(s). +service SignalService { + // Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. + rpc GetOrCreateSignal (flyteidl.admin.SignalGetOrCreateRequest) returns (flyteidl.admin.Signal) { + // Purposefully left out an HTTP API for this RPC call. This is meant to idempotently retrieve + // a signal, meaning the first call will create the signal and all subsequent calls will + // fetch the existing signal. This is only useful during Flyte Workflow execution and therefore + // is not exposed to mitigate unintended behavior. + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Retrieve a signal, creating it if it does not exist." + }; + } + + // Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. + rpc ListSignals (flyteidl.admin.SignalListRequest) returns (flyteidl.admin.SignalList) { + option (google.api.http) = { + get: "/api/v1/signals/{workflow_execution_id.project}/{workflow_execution_id.domain}/{workflow_execution_id.name}" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Fetch existing signal definitions matching the input signal id filters." + }; + } + + // Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition + rpc SetSignal (flyteidl.admin.SignalSetRequest) returns (flyteidl.admin.SignalSetResponse) { + option (google.api.http) = { + post: "/api/v1/signals" + body: "*" + }; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + description: "Set a signal value." + responses: { + key: "400" + value: { + description: "Returned for bad request that may have failed validation." + } + } + responses: { + key: "409" + value: { + description: "Returned for a request that references an identical entity that has already been registered." + } + } + }; + } +} diff --git a/flyrs/protos/google/api/annotations.proto b/flyrs/protos/google/api/annotations.proto new file mode 100644 index 0000000000..85c361b47f --- /dev/null +++ b/flyrs/protos/google/api/annotations.proto @@ -0,0 +1,31 @@ +// Copyright (c) 2015, Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/api/http.proto"; +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationsProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // See `HttpRule`. + HttpRule http = 72295728; +} diff --git a/flyrs/protos/google/api/client.proto b/flyrs/protos/google/api/client.proto new file mode 100644 index 0000000000..2102623d30 --- /dev/null +++ b/flyrs/protos/google/api/client.proto @@ -0,0 +1,99 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ClientProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.MethodOptions { + // A definition of a client library method signature. + // + // In client libraries, each proto RPC corresponds to one or more methods + // which the end user is able to call, and calls the underlying RPC. + // Normally, this method receives a single argument (a struct or instance + // corresponding to the RPC request object). Defining this field will + // add one or more overloads providing flattened or simpler method signatures + // in some languages. + // + // The fields on the method signature are provided as a comma-separated + // string. + // + // For example, the proto RPC and annotation: + // + // rpc CreateSubscription(CreateSubscriptionRequest) + // returns (Subscription) { + // option (google.api.method_signature) = "name,topic"; + // } + // + // Would add the following Java overload (in addition to the method accepting + // the request object): + // + // public final Subscription createSubscription(String name, String topic) + // + // The following backwards-compatibility guidelines apply: + // + // * Adding this annotation to an unannotated method is backwards + // compatible. + // * Adding this annotation to a method which already has existing + // method signature annotations is backwards compatible if and only if + // the new method signature annotation is last in the sequence. + // * Modifying or removing an existing method signature annotation is + // a breaking change. + // * Re-ordering existing method signature annotations is a breaking + // change. + repeated string method_signature = 1051; +} + +extend google.protobuf.ServiceOptions { + // The hostname for this service. + // This should be specified with no prefix or protocol. + // + // Example: + // + // service Foo { + // option (google.api.default_host) = "foo.googleapi.com"; + // ... + // } + string default_host = 1049; + + // OAuth scopes needed for the client. + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform"; + // ... + // } + // + // If there is more than one scope, use a comma-separated string: + // + // Example: + // + // service Foo { + // option (google.api.oauth_scopes) = \ + // "https://www.googleapis.com/auth/cloud-platform," + // "https://www.googleapis.com/auth/monitoring"; + // ... + // } + string oauth_scopes = 1050; +} diff --git a/flyrs/protos/google/api/field_behavior.proto b/flyrs/protos/google/api/field_behavior.proto new file mode 100644 index 0000000000..686667954a --- /dev/null +++ b/flyrs/protos/google/api/field_behavior.proto @@ -0,0 +1,84 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "FieldBehaviorProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // A designation of a specific field behavior (required, output only, etc.) + // in protobuf messages. + // + // Examples: + // + // string name = 1 [(google.api.field_behavior) = REQUIRED]; + // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + // google.protobuf.Duration ttl = 1 + // [(google.api.field_behavior) = INPUT_ONLY]; + // google.protobuf.Timestamp expire_time = 1 + // [(google.api.field_behavior) = OUTPUT_ONLY, + // (google.api.field_behavior) = IMMUTABLE]; + repeated google.api.FieldBehavior field_behavior = 1052; +} + +// An indicator of the behavior of a given field (for example, that a field +// is required in requests, or given as output but ignored as input). +// This **does not** change the behavior in protocol buffers itself; it only +// denotes the behavior and may affect how API tooling handles the field. +// +// Note: This enum **may** receive new values in the future. +enum FieldBehavior { + // Conventional default for enums. Do not use this. + FIELD_BEHAVIOR_UNSPECIFIED = 0; + + // Specifically denotes a field as optional. + // While all fields in protocol buffers are optional, this may be specified + // for emphasis if appropriate. + OPTIONAL = 1; + + // Denotes a field as required. + // This indicates that the field **must** be provided as part of the request, + // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). + REQUIRED = 2; + + // Denotes a field as output only. + // This indicates that the field is provided in responses, but including the + // field in a request does nothing (the server *must* ignore it and + // *must not* throw an error as a result of the field's presence). + OUTPUT_ONLY = 3; + + // Denotes a field as input only. + // This indicates that the field is provided in requests, and the + // corresponding field is not included in output. + INPUT_ONLY = 4; + + // Denotes a field as immutable. + // This indicates that the field may be set once in a request to create a + // resource, but may not be changed thereafter. + IMMUTABLE = 5; + + // Denotes that a (repeated) field is an unordered list. + // This indicates that the service may provide the elements of the list + // in any arbitrary order, rather than the order the user originally + // provided. Additionally, the list's order may or may not be stable. + UNORDERED_LIST = 6; +} diff --git a/flyrs/protos/google/api/http.proto b/flyrs/protos/google/api/http.proto new file mode 100644 index 0000000000..69460cf791 --- /dev/null +++ b/flyrs/protos/google/api/http.proto @@ -0,0 +1,375 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "HttpProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +// Defines the HTTP configuration for an API service. It contains a list of +// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +// to one or more HTTP REST API methods. +message Http { + // A list of HTTP configuration rules that apply to individual API methods. + // + // **NOTE:** All service configuration rules follow "last one wins" order. + repeated HttpRule rules = 1; + + // When set to true, URL path parameters will be fully URI-decoded except in + // cases of single segment matches in reserved expansion, where "%2F" will be + // left encoded. + // + // The default behavior is to not decode RFC 6570 reserved characters in multi + // segment matches. + bool fully_decode_reserved_expansion = 2; +} + +// # gRPC Transcoding +// +// gRPC Transcoding is a feature for mapping between a gRPC method and one or +// more HTTP REST endpoints. It allows developers to build a single API service +// that supports both gRPC APIs and REST APIs. Many systems, including [Google +// APIs](https://github.com/googleapis/googleapis), +// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC +// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), +// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature +// and use it for large scale production services. +// +// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +// how different portions of the gRPC request message are mapped to the URL +// path, URL query parameters, and HTTP request body. It also controls how the +// gRPC response message is mapped to the HTTP response body. `HttpRule` is +// typically specified as an `google.api.http` annotation on the gRPC method. +// +// Each mapping specifies a URL path template and an HTTP method. The path +// template may refer to one or more fields in the gRPC request message, as long +// as each field is a non-repeated field with a primitive (non-message) type. +// The path template controls how fields of the request message are mapped to +// the URL path. +// +// Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/{name=messages/*}" +// }; +// } +// } +// message GetMessageRequest { +// string name = 1; // Mapped to URL path. +// } +// message Message { +// string text = 1; // The resource content. +// } +// +// This enables an HTTP REST to gRPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +// +// Any fields in the request message which are not bound by the path template +// automatically become HTTP query parameters if there is no HTTP request body. +// For example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get:"/v1/messages/{message_id}" +// }; +// } +// } +// message GetMessageRequest { +// message SubMessage { +// string subfield = 1; +// } +// string message_id = 1; // Mapped to URL path. +// int64 revision = 2; // Mapped to URL query parameter `revision`. +// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +// } +// +// This enables a HTTP JSON to RPC mapping as below: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +// "foo"))` +// +// Note that fields which are mapped to URL query parameters must have a +// primitive type or a repeated primitive type or a non-repeated message type. +// In the case of a repeated type, the parameter can be repeated in the URL +// as `...?param=A¶m=B`. In the case of a message type, each field of the +// message is mapped to a separate parameter, such as +// `...?foo.a=A&foo.b=B&foo.c=C`. +// +// For HTTP methods that allow a request body, the `body` field +// specifies the mapping. Consider a REST update method on the +// message resource collection: +// +// service Messaging { +// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "message" +// }; +// } +// } +// message UpdateMessageRequest { +// string message_id = 1; // mapped to the URL +// Message message = 2; // mapped to the body +// } +// +// The following HTTP JSON to RPC mapping is enabled, where the +// representation of the JSON in the request body is determined by +// protos JSON encoding: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" message { text: "Hi!" })` +// +// The special name `*` can be used in the body mapping to define that +// every field not bound by the path template should be mapped to the +// request body. This enables the following alternative definition of +// the update method: +// +// service Messaging { +// rpc UpdateMessage(Message) returns (Message) { +// option (google.api.http) = { +// patch: "/v1/messages/{message_id}" +// body: "*" +// }; +// } +// } +// message Message { +// string message_id = 1; +// string text = 2; +// } +// +// +// The following HTTP JSON to RPC mapping is enabled: +// +// HTTP | gRPC +// -----|----- +// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +// "123456" text: "Hi!")` +// +// Note that when using `*` in the body mapping, it is not possible to +// have HTTP parameters, as all fields not bound by the path end in +// the body. This makes this option more rarely used in practice when +// defining REST APIs. The common usage of `*` is in custom methods +// which don't use the URL at all for transferring data. +// +// It is possible to define multiple HTTP methods for one RPC by using +// the `additional_bindings` option. Example: +// +// service Messaging { +// rpc GetMessage(GetMessageRequest) returns (Message) { +// option (google.api.http) = { +// get: "/v1/messages/{message_id}" +// additional_bindings { +// get: "/v1/users/{user_id}/messages/{message_id}" +// } +// }; +// } +// } +// message GetMessageRequest { +// string message_id = 1; +// string user_id = 2; +// } +// +// This enables the following two alternative HTTP JSON to RPC mappings: +// +// HTTP | gRPC +// -----|----- +// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +// "123456")` +// +// ## Rules for HTTP mapping +// +// 1. Leaf request fields (recursive expansion nested messages in the request +// message) are classified into three categories: +// - Fields referred by the path template. They are passed via the URL path. +// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +// request body. +// - All other fields are passed via the URL query parameters, and the +// parameter name is the field path in the request message. A repeated +// field can be represented as multiple query parameters under the same +// name. +// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +// are passed via URL path and HTTP request body. +// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +// fields are passed via URL path and URL query parameters. +// +// ### Path template syntax +// +// Template = "/" Segments [ Verb ] ; +// Segments = Segment { "/" Segment } ; +// Segment = "*" | "**" | LITERAL | Variable ; +// Variable = "{" FieldPath [ "=" Segments ] "}" ; +// FieldPath = IDENT { "." IDENT } ; +// Verb = ":" LITERAL ; +// +// The syntax `*` matches a single URL path segment. The syntax `**` matches +// zero or more URL path segments, which must be the last part of the URL path +// except the `Verb`. +// +// The syntax `Variable` matches part of the URL path as specified by its +// template. A variable template must not contain other variables. If a variable +// matches a single path segment, its template may be omitted, e.g. `{var}` +// is equivalent to `{var=*}`. +// +// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +// contains any reserved character, such characters should be percent-encoded +// before the matching. +// +// If a variable contains exactly one path segment, such as `"{var}"` or +// `"{var=*}"`, when such a variable is expanded into a URL path on the client +// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The +// server side does the reverse decoding. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{var}`. +// +// If a variable contains multiple path segments, such as `"{var=foo/*}"` +// or `"{var=**}"`, when such a variable is expanded into a URL path on the +// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. +// The server side does the reverse decoding, except "%2F" and "%2f" are left +// unchanged. Such variables show up in the +// [Discovery +// Document](https://developers.google.com/discovery/v1/reference/apis) as +// `{+var}`. +// +// ## Using gRPC API Service Configuration +// +// gRPC API Service Configuration (service config) is a configuration language +// for configuring a gRPC service to become a user-facing product. The +// service config is simply the YAML representation of the `google.api.Service` +// proto message. +// +// As an alternative to annotating your proto file, you can configure gRPC +// transcoding in your service config YAML files. You do this by specifying a +// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +// effect as the proto annotation. This can be particularly useful if you +// have a proto that is reused in multiple services. Note that any transcoding +// specified in the service config will override any matching transcoding +// configuration in the proto. +// +// Example: +// +// http: +// rules: +// # Selects a gRPC method and applies HttpRule to it. +// - selector: example.v1.Messaging.GetMessage +// get: /v1/messages/{message_id}/{sub.subfield} +// +// ## Special notes +// +// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +// proto to JSON conversion must follow the [proto3 +// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). +// +// While the single segment variable follows the semantics of +// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String +// Expansion, the multi segment variable **does not** follow RFC 6570 Section +// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +// does not expand special characters like `?` and `#`, which would lead +// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +// for multi segment variables. +// +// The path variables **must not** refer to any repeated or mapped field, +// because client libraries are not capable of handling such variable expansion. +// +// The path variables **must not** capture the leading "/" character. The reason +// is that the most common use case "{var}" does not capture the leading "/" +// character. For consistency, all path variables must share the same behavior. +// +// Repeated message fields must not be mapped to URL query parameters, because +// no client library can support such complicated mapping. +// +// If an API needs to use a JSON array for request or response body, it can map +// the request or response body to a repeated field. However, some gRPC +// Transcoding implementations may not support this feature. +message HttpRule { + // Selects a method to which this rule applies. + // + // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + string selector = 1; + + // Determines the URL pattern is matched by this rules. This pattern can be + // used with any of the {get|put|post|delete|patch} methods. A custom method + // can be defined using the 'custom' field. + oneof pattern { + // Maps to HTTP GET. Used for listing and getting information about + // resources. + string get = 2; + + // Maps to HTTP PUT. Used for replacing a resource. + string put = 3; + + // Maps to HTTP POST. Used for creating a resource or performing an action. + string post = 4; + + // Maps to HTTP DELETE. Used for deleting a resource. + string delete = 5; + + // Maps to HTTP PATCH. Used for updating a resource. + string patch = 6; + + // The custom pattern is used for specifying an HTTP method that is not + // included in the `pattern` field, such as HEAD, or "*" to leave the + // HTTP method unspecified for this rule. The wild-card rule is useful + // for services that provide content to Web (HTML) clients. + CustomHttpPattern custom = 8; + } + + // The name of the request field whose value is mapped to the HTTP request + // body, or `*` for mapping all request fields not captured by the path + // pattern to the HTTP body, or omitted for not having any HTTP request body. + // + // NOTE: the referred field must be present at the top-level of the request + // message type. + string body = 7; + + // Optional. The name of the response field whose value is mapped to the HTTP + // response body. When omitted, the entire response message will be used + // as the HTTP response body. + // + // NOTE: The referred field must be present at the top-level of the response + // message type. + string response_body = 12; + + // Additional HTTP bindings for the selector. Nested bindings must + // not contain an `additional_bindings` field themselves (that is, + // the nesting may only be one level deep). + repeated HttpRule additional_bindings = 11; +} + +// A custom pattern is used for defining custom HTTP verb. +message CustomHttpPattern { + // The name of this custom HTTP verb. + string kind = 1; + + // The path matched by this custom verb. + string path = 2; +} diff --git a/flyrs/protos/google/api/resource.proto b/flyrs/protos/google/api/resource.proto new file mode 100644 index 0000000000..fd9ee66def --- /dev/null +++ b/flyrs/protos/google/api/resource.proto @@ -0,0 +1,299 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.api; + +import "google/protobuf/descriptor.proto"; + +option cc_enable_arenas = true; +option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; +option java_multiple_files = true; +option java_outer_classname = "ResourceProto"; +option java_package = "com.google.api"; +option objc_class_prefix = "GAPI"; + +extend google.protobuf.FieldOptions { + // An annotation that describes a resource reference, see + // [ResourceReference][]. + google.api.ResourceReference resource_reference = 1055; +} + +extend google.protobuf.FileOptions { + // An annotation that describes a resource definition without a corresponding + // message; see [ResourceDescriptor][]. + repeated google.api.ResourceDescriptor resource_definition = 1053; +} + +extend google.protobuf.MessageOptions { + // An annotation that describes a resource definition, see + // [ResourceDescriptor][]. + google.api.ResourceDescriptor resource = 1053; +} + +// A simple descriptor of a resource type. +// +// ResourceDescriptor annotates a resource message (either by means of a +// protobuf annotation or use in the service config), and associates the +// resource's schema, the resource type, and the pattern of the resource name. +// +// Example: +// +// message Topic { +// // Indicates this message defines a resource schema. +// // Declares the resource type in the format of {service}/{kind}. +// // For Kubernetes resources, the format is {api group}/{kind}. +// option (google.api.resource) = { +// type: "pubsub.googleapis.com/Topic" +// name_descriptor: { +// pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: "pubsub.googleapis.com/Topic" +// name_descriptor: +// - pattern: "projects/{project}/topics/{topic}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// +// Sometimes, resources have multiple patterns, typically because they can +// live under multiple parents. +// +// Example: +// +// message LogEntry { +// option (google.api.resource) = { +// type: "logging.googleapis.com/LogEntry" +// name_descriptor: { +// pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// } +// name_descriptor: { +// pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// } +// name_descriptor: { +// pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// } +// name_descriptor: { +// pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'logging.googleapis.com/LogEntry' +// name_descriptor: +// - pattern: "projects/{project}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// parent_name_extractor: "projects/{project}" +// - pattern: "folders/{folder}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// parent_name_extractor: "folders/{folder}" +// - pattern: "organizations/{organization}/logs/{log}" +// parent_type: "cloudresourcemanager.googleapis.com/Organization" +// parent_name_extractor: "organizations/{organization}" +// - pattern: "billingAccounts/{billing_account}/logs/{log}" +// parent_type: "billing.googleapis.com/BillingAccount" +// parent_name_extractor: "billingAccounts/{billing_account}" +// +// For flexible resources, the resource name doesn't contain parent names, but +// the resource itself has parents for policy evaluation. +// +// Example: +// +// message Shelf { +// option (google.api.resource) = { +// type: "library.googleapis.com/Shelf" +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// } +// name_descriptor: { +// pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +// } +// }; +// } +// +// The ResourceDescriptor Yaml config will look like: +// +// resources: +// - type: 'library.googleapis.com/Shelf' +// name_descriptor: +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Project" +// - pattern: "shelves/{shelf}" +// parent_type: "cloudresourcemanager.googleapis.com/Folder" +message ResourceDescriptor { + // A description of the historical or future-looking state of the + // resource pattern. + enum History { + // The "unset" value. + HISTORY_UNSPECIFIED = 0; + + // The resource originally had one pattern and launched as such, and + // additional patterns were added later. + ORIGINALLY_SINGLE_PATTERN = 1; + + // The resource has one pattern, but the API owner expects to add more + // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents + // that from being necessary once there are multiple patterns.) + FUTURE_MULTI_PATTERN = 2; + } + + // A flag representing a specific style that a resource claims to conform to. + enum Style { + // The unspecified value. Do not use. + STYLE_UNSPECIFIED = 0; + + // This resource is intended to be "declarative-friendly". + // + // Declarative-friendly resources must be more strictly consistent, and + // setting this to true communicates to tools that this resource should + // adhere to declarative-friendly expectations. + // + // Note: This is used by the API linter (linter.aip.dev) to enable + // additional checks. + DECLARATIVE_FRIENDLY = 1; + } + + // The resource type. It must be in the format of + // {service_name}/{resource_type_kind}. The `resource_type_kind` must be + // singular and must not include version numbers. + // + // Example: `storage.googleapis.com/Bucket` + // + // The value of the resource_type_kind must follow the regular expression + // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and + // should use PascalCase (UpperCamelCase). The maximum number of + // characters allowed for the `resource_type_kind` is 100. + string type = 1; + + // Optional. The relative resource name pattern associated with this resource + // type. The DNS prefix of the full resource name shouldn't be specified here. + // + // The path pattern must follow the syntax, which aligns with HTTP binding + // syntax: + // + // Template = Segment { "/" Segment } ; + // Segment = LITERAL | Variable ; + // Variable = "{" LITERAL "}" ; + // + // Examples: + // + // - "projects/{project}/topics/{topic}" + // - "projects/{project}/knowledgeBases/{knowledge_base}" + // + // The components in braces correspond to the IDs for each resource in the + // hierarchy. It is expected that, if multiple patterns are provided, + // the same component name (e.g. "project") refers to IDs of the same + // type of resource. + repeated string pattern = 2; + + // Optional. The field on the resource that designates the resource name + // field. If omitted, this is assumed to be "name". + string name_field = 3; + + // Optional. The historical or future-looking state of the resource pattern. + // + // Example: + // + // // The InspectTemplate message originally only supported resource + // // names with organization, and project was added later. + // message InspectTemplate { + // option (google.api.resource) = { + // type: "dlp.googleapis.com/InspectTemplate" + // pattern: + // "organizations/{organization}/inspectTemplates/{inspect_template}" + // pattern: "projects/{project}/inspectTemplates/{inspect_template}" + // history: ORIGINALLY_SINGLE_PATTERN + // }; + // } + History history = 4; + + // The plural name used in the resource name and permission names, such as + // 'projects' for the resource name of 'projects/{project}' and the permission + // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same + // concept of the `plural` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // + // Note: The plural form is required even for singleton resources. See + // https://aip.dev/156 + string plural = 5; + + // The same concept of the `singular` field in k8s CRD spec + // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ + // Such as "project" for the `resourcemanager.googleapis.com/Project` type. + string singular = 6; + + // Style flag(s) for this resource. + // These indicate that a resource is expected to conform to a given + // style. See the specific style flags for additional information. + repeated Style style = 10; +} + +// Defines a proto annotation that describes a string field that refers to +// an API resource. +message ResourceReference { + // The resource type that the annotated field references. + // + // Example: + // + // message Subscription { + // string topic = 2 [(google.api.resource_reference) = { + // type: "pubsub.googleapis.com/Topic" + // }]; + // } + // + // Occasionally, a field may reference an arbitrary resource. In this case, + // APIs use the special value * in their resource reference. + // + // Example: + // + // message GetIamPolicyRequest { + // string resource = 2 [(google.api.resource_reference) = { + // type: "*" + // }]; + // } + string type = 1; + + // The resource type of a child collection that the annotated field + // references. This is useful for annotating the `parent` field that + // doesn't have a fixed resource type. + // + // Example: + // + // message ListLogEntriesRequest { + // string parent = 1 [(google.api.resource_reference) = { + // child_type: "logging.googleapis.com/LogEntry" + // }; + // } + string child_type = 2; +} diff --git a/flyrs/protos/google/pubsub/v1/pubsub.proto b/flyrs/protos/google/pubsub/v1/pubsub.proto new file mode 100644 index 0000000000..9bc678e3ae --- /dev/null +++ b/flyrs/protos/google/pubsub/v1/pubsub.proto @@ -0,0 +1,1316 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.pubsub.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/pubsub/v1/schema.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.PubSub.V1"; +option go_package = "google.golang.org/genproto/googleapis/pubsub/v1;pubsub"; +option java_multiple_files = true; +option java_outer_classname = "PubsubProto"; +option java_package = "com.google.pubsub.v1"; +option php_namespace = "Google\\Cloud\\PubSub\\V1"; +option ruby_package = "Google::Cloud::PubSub::V1"; + +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +service Publisher { + option (google.api.default_host) = "pubsub.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/pubsub"; + + // Creates the given topic with the given name. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). + rpc CreateTopic(Topic) returns (Topic) { + option (google.api.http) = { + put: "/v1/{name=projects/*/topics/*}" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Updates an existing topic. Note that certain properties of a + // topic are not modifiable. + rpc UpdateTopic(UpdateTopicRequest) returns (Topic) { + option (google.api.http) = { + patch: "/v1/{topic.name=projects/*/topics/*}" + body: "*" + }; + } + + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. + rpc Publish(PublishRequest) returns (PublishResponse) { + option (google.api.http) = { + post: "/v1/{topic=projects/*/topics/*}:publish" + body: "*" + }; + option (google.api.method_signature) = "topic,messages"; + } + + // Gets the configuration of a topic. + rpc GetTopic(GetTopicRequest) returns (Topic) { + option (google.api.http) = { + get: "/v1/{topic=projects/*/topics/*}" + }; + option (google.api.method_signature) = "topic"; + } + + // Lists matching topics. + rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse) { + option (google.api.http) = { + get: "/v1/{project=projects/*}/topics" + }; + option (google.api.method_signature) = "project"; + } + + // Lists the names of the attached subscriptions on this topic. + rpc ListTopicSubscriptions(ListTopicSubscriptionsRequest) + returns (ListTopicSubscriptionsResponse) { + option (google.api.http) = { + get: "/v1/{topic=projects/*/topics/*}/subscriptions" + }; + option (google.api.method_signature) = "topic"; + } + + // Lists the names of the snapshots on this topic. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + rpc ListTopicSnapshots(ListTopicSnapshotsRequest) + returns (ListTopicSnapshotsResponse) { + option (google.api.http) = { + get: "/v1/{topic=projects/*/topics/*}/snapshots" + }; + option (google.api.method_signature) = "topic"; + } + + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + rpc DeleteTopic(DeleteTopicRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{topic=projects/*/topics/*}" + }; + option (google.api.method_signature) = "topic"; + } + + // Detaches a subscription from this topic. All messages retained in the + // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will stop. + rpc DetachSubscription(DetachSubscriptionRequest) + returns (DetachSubscriptionResponse) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:detach" + }; + } +} + +// A policy constraining the storage of messages published to the topic. +message MessageStoragePolicy { + // A list of IDs of GCP regions where messages that are published to the topic + // may be persisted in storage. Messages published by publishers running in + // non-allowed GCP regions (or running outside of GCP altogether) will be + // routed for storage in one of the allowed regions. An empty list means that + // no regions are allowed, and is not a valid configuration. + repeated string allowed_persistence_regions = 1; +} + +// Settings for validating messages published against a schema. +message SchemaSettings { + // Required. The name of the schema that messages published should be + // validated against. Format is `projects/{project}/schemas/{schema}`. The + // value of this field will be `_deleted-schema_` if the schema has been + // deleted. + string schema = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } + ]; + + // The encoding of messages validated against `schema`. + Encoding encoding = 2; +} + +// A topic resource. +message Topic { + option (google.api.resource) = { + type: "pubsub.googleapis.com/Topic" + pattern: "projects/{project}/topics/{topic}" + pattern: "_deleted-topic_" + }; + + // Required. The name of the topic. It must have the format + // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, + // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), + // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent + // signs (`%`). It must be between 3 and 255 characters in length, and it + // must not start with `"goog"`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + map labels = 2; + + // Policy constraining the set of Google Cloud Platform regions where messages + // published to the topic may be stored. If not present, then no constraints + // are in effect. + MessageStoragePolicy message_storage_policy = 3; + + // The resource name of the Cloud KMS CryptoKey to be used to protect access + // to messages published on this topic. + // + // The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + string kms_key_name = 5; + + // Settings for validating messages published against a schema. + // + // EXPERIMENTAL: Schema support is in development and may not work yet. + SchemaSettings schema_settings = 6; + + // Reserved for future use. This field is set only in responses from the + // server; it is ignored if it is set in any requests. + bool satisfies_pzs = 7; +} + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one attribute. +// Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +message PubsubMessage { + // The message data field. If this field is empty, the message must contain + // at least one attribute. + bytes data = 1; + + // Attributes for this message. If this field is empty, the message must + // contain non-empty data. This can be used to filter messages on the + // subscription. + map attributes = 2; + + // ID of this message, assigned by the server when the message is published. + // Guaranteed to be unique within the topic. This value may be read by a + // subscriber that receives a `PubsubMessage` via a `Pull` call or a push + // delivery. It must not be populated by the publisher in a `Publish` call. + string message_id = 3; + + // The time at which the message was published, populated by the server when + // it receives the `Publish` call. It must not be populated by the + // publisher in a `Publish` call. + google.protobuf.Timestamp publish_time = 4; + + // If non-empty, identifies related messages for which publish order should be + // respected. If a `Subscription` has `enable_message_ordering` set to `true`, + // messages published with the same non-empty `ordering_key` value will be + // delivered to subscribers in the order in which they are received by the + // Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` + // must specify the same `ordering_key` value. + string ordering_key = 5; +} + +// Request for the GetTopic method. +message GetTopicRequest { + // Required. The name of the topic to get. + // Format is `projects/{project}/topics/{topic}`. + string topic = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; +} + +// Request for the UpdateTopic method. +message UpdateTopicRequest { + // Required. The updated topic object. + Topic topic = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Indicates which fields in the provided topic to update. Must be + // specified and non-empty. Note that if `update_mask` contains + // "message_storage_policy" but the `message_storage_policy` is not set in + // the `topic` provided above, then the updated value is determined by the + // policy configured at the project or organization level. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request for the Publish method. +message PublishRequest { + // Required. The messages in the request will be published on this topic. + // Format is `projects/{project}/topics/{topic}`. + string topic = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; + + // Required. The messages to publish. + repeated PubsubMessage messages = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response for the `Publish` method. +message PublishResponse { + // The server-assigned ID of each published message, in the same order as + // the messages in the request. IDs are guaranteed to be unique within + // the topic. + repeated string message_ids = 1; +} + +// Request for the `ListTopics` method. +message ListTopicsRequest { + // Required. The name of the project in which to list topics. + // Format is `projects/{project-id}`. + string project = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Maximum number of topics to return. + int32 page_size = 2; + + // The value returned by the last `ListTopicsResponse`; indicates that this is + // a continuation of a prior `ListTopics` call, and that the system should + // return the next page of data. + string page_token = 3; +} + +// Response for the `ListTopics` method. +message ListTopicsResponse { + // The resulting topics. + repeated Topic topics = 1; + + // If not empty, indicates that there may be more topics that match the + // request; this value should be passed in a new `ListTopicsRequest`. + string next_page_token = 2; +} + +// Request for the `ListTopicSubscriptions` method. +message ListTopicSubscriptionsRequest { + // Required. The name of the topic that subscriptions are attached to. + // Format is `projects/{project}/topics/{topic}`. + string topic = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; + + // Maximum number of subscription names to return. + int32 page_size = 2; + + // The value returned by the last `ListTopicSubscriptionsResponse`; indicates + // that this is a continuation of a prior `ListTopicSubscriptions` call, and + // that the system should return the next page of data. + string page_token = 3; +} + +// Response for the `ListTopicSubscriptions` method. +message ListTopicSubscriptionsResponse { + // The names of subscriptions attached to the topic specified in the request. + repeated string subscriptions = 1 [(google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + }]; + + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListTopicSubscriptionsRequest` to get more subscriptions. + string next_page_token = 2; +} + +// Request for the `ListTopicSnapshots` method. +message ListTopicSnapshotsRequest { + // Required. The name of the topic that snapshots are attached to. + // Format is `projects/{project}/topics/{topic}`. + string topic = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; + + // Maximum number of snapshot names to return. + int32 page_size = 2; + + // The value returned by the last `ListTopicSnapshotsResponse`; indicates + // that this is a continuation of a prior `ListTopicSnapshots` call, and + // that the system should return the next page of data. + string page_token = 3; +} + +// Response for the `ListTopicSnapshots` method. +message ListTopicSnapshotsResponse { + // The names of the snapshots that match the request. + repeated string snapshots = 1; + + // If not empty, indicates that there may be more snapshots that match + // the request; this value should be passed in a new + // `ListTopicSnapshotsRequest` to get more snapshots. + string next_page_token = 2; +} + +// Request for the `DeleteTopic` method. +message DeleteTopicRequest { + // Required. Name of the topic to delete. + // Format is `projects/{project}/topics/{topic}`. + string topic = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; +} + +// Request for the DetachSubscription method. +message DetachSubscriptionRequest { + // Required. The subscription to detach. + // Format is `projects/{project}/subscriptions/{subscription}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; +} + +// Response for the DetachSubscription method. +// Reserved for future use. +message DetachSubscriptionResponse {} + +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the `Pull` method or by +// establishing a bi-directional stream using the `StreamingPull` method. +service Subscriber { + option (google.api.default_host) = "pubsub.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/pubsub"; + + // Creates a subscription to a given topic. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated + // name is populated in the returned Subscription object. Note that for REST + // API requests, you must specify a name in the request. + rpc CreateSubscription(Subscription) returns (Subscription) { + option (google.api.http) = { + put: "/v1/{name=projects/*/subscriptions/*}" + body: "*" + }; + option (google.api.method_signature) = + "name,topic,push_config,ack_deadline_seconds"; + } + + // Gets the configuration details of a subscription. + rpc GetSubscription(GetSubscriptionRequest) returns (Subscription) { + option (google.api.http) = { + get: "/v1/{subscription=projects/*/subscriptions/*}" + }; + option (google.api.method_signature) = "subscription"; + } + + // Updates an existing subscription. Note that certain properties of a + // subscription, such as its topic, are not modifiable. + rpc UpdateSubscription(UpdateSubscriptionRequest) returns (Subscription) { + option (google.api.http) = { + patch: "/v1/{subscription.name=projects/*/subscriptions/*}" + body: "*" + }; + } + + // Lists matching subscriptions. + rpc ListSubscriptions(ListSubscriptionsRequest) + returns (ListSubscriptionsResponse) { + option (google.api.http) = { + get: "/v1/{project=projects/*}/subscriptions" + }; + option (google.api.method_signature) = "project"; + } + + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + rpc DeleteSubscription(DeleteSubscriptionRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{subscription=projects/*/subscriptions/*}" + }; + option (google.api.method_signature) = "subscription"; + } + + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + rpc ModifyAckDeadline(ModifyAckDeadlineRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline" + body: "*" + }; + option (google.api.method_signature) = + "subscription,ack_ids,ack_deadline_seconds"; + } + + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + rpc Acknowledge(AcknowledgeRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:acknowledge" + body: "*" + }; + option (google.api.method_signature) = "subscription,ack_ids"; + } + + // Pulls messages from the server. The server may return `UNAVAILABLE` if + // there are too many concurrent pull requests pending for the given + // subscription. + rpc Pull(PullRequest) returns (PullResponse) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:pull" + body: "*" + }; + option (google.api.method_signature) = + "subscription,return_immediately,max_messages"; + } + + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgements and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `UNAVAILABLE` to + // reassign server-side resources, in which case, the client should + // re-establish the stream. Flow control can be achieved by configuring the + // underlying RPC channel. + rpc StreamingPull(stream StreamingPullRequest) + returns (stream StreamingPullResponse) {} + + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + rpc ModifyPushConfig(ModifyPushConfigRequest) + returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig" + body: "*" + }; + option (google.api.method_signature) = "subscription,push_config"; + } + + // Gets the configuration details of a snapshot. Snapshots are used in + // Seek + // operations, which allow you to manage message acknowledgments in bulk. That + // is, you can set the acknowledgment state of messages in an existing + // subscription to the state captured by a snapshot. + rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { + option (google.api.http) = { + get: "/v1/{snapshot=projects/*/snapshots/*}" + }; + option (google.api.method_signature) = "snapshot"; + } + + // Lists the existing snapshots. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { + option (google.api.http) = { + get: "/v1/{project=projects/*}/snapshots" + }; + option (google.api.method_signature) = "project"; + } + + // Creates a snapshot from the requested subscription. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // If the backlog in the subscription is too old -- and the resulting snapshot + // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. + // See also the `Snapshot.expire_time` field. If the name is not provided in + // the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The + // generated name is populated in the returned Snapshot object. Note that for + // REST API requests, you must specify a name in the request. + rpc CreateSnapshot(CreateSnapshotRequest) returns (Snapshot) { + option (google.api.http) = { + put: "/v1/{name=projects/*/snapshots/*}" + body: "*" + }; + option (google.api.method_signature) = "name,subscription"; + } + + // Updates an existing snapshot. Snapshots are used in + // Seek + // operations, which allow + // you to manage message acknowledgments in bulk. That is, you can set the + // acknowledgment state of messages in an existing subscription to the state + // captured by a snapshot. + rpc UpdateSnapshot(UpdateSnapshotRequest) returns (Snapshot) { + option (google.api.http) = { + patch: "/v1/{snapshot.name=projects/*/snapshots/*}" + body: "*" + }; + } + + // Removes an existing snapshot. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // When the snapshot is deleted, all messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{snapshot=projects/*/snapshots/*}" + }; + option (google.api.method_signature) = "snapshot"; + } + + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. Note that both the subscription and the + // snapshot must be on the same topic. + rpc Seek(SeekRequest) returns (SeekResponse) { + option (google.api.http) = { + post: "/v1/{subscription=projects/*/subscriptions/*}:seek" + body: "*" + }; + } +} + +// A subscription resource. +message Subscription { + option (google.api.resource) = { + type: "pubsub.googleapis.com/Subscription" + pattern: "projects/{project}/subscriptions/{subscription}" + }; + + // Required. The name of the subscription. It must have the format + // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must + // start with a letter, and contain only letters (`[A-Za-z]`), numbers + // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), + // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with `"goog"`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The name of the topic from which this subscription is receiving + // messages. Format is `projects/{project}/topics/{topic}`. The value of this + // field will be `_deleted-topic_` if the topic has been deleted. + string topic = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; + + // If push delivery is used with this subscription, this field is + // used to configure it. An empty `pushConfig` signifies that the subscriber + // will pull and ack messages using API methods. + PushConfig push_config = 4; + + // The approximate amount of time (on a best-effort basis) Pub/Sub waits for + // the subscriber to acknowledge receipt before resending the message. In the + // interval after the message is delivered and before it is acknowledged, it + // is considered to be outstanding. During that time period, the + // message will not be redelivered (on a best-effort basis). + // + // For pull subscriptions, this value is used as the initial value for the ack + // deadline. To override this value for a given message, call + // `ModifyAckDeadline` with the corresponding `ack_id` if using + // non-streaming pull or send the `ack_id` in a + // `StreamingModifyAckDeadlineRequest` if using streaming pull. + // The minimum custom deadline you can specify is 10 seconds. + // The maximum custom deadline you can specify is 600 seconds (10 minutes). + // If this parameter is 0, a default value of 10 seconds is used. + // + // For push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. + // + // If the subscriber never acknowledges the message, the Pub/Sub + // system will eventually redeliver the message. + int32 ack_deadline_seconds = 5; + + // Indicates whether to retain acknowledged messages. If true, then + // messages are not expunged from the subscription's backlog, even if they are + // acknowledged, until they fall out of the `message_retention_duration` + // window. This must be true if you would like to [Seek to a timestamp] + // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time). + bool retain_acked_messages = 7; + + // How long to retain unacknowledged messages in the subscription's backlog, + // from the moment a message is published. + // If `retain_acked_messages` is true, then this also configures the retention + // of acknowledged messages, and thus configures how far back in time a `Seek` + // can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 + // minutes. + google.protobuf.Duration message_retention_duration = 8; + + // See Creating and + // managing labels. + map labels = 9; + + // If true, messages published with the same `ordering_key` in `PubsubMessage` + // will be delivered to the subscribers in the order in which they + // are received by the Pub/Sub system. Otherwise, they may be delivered in + // any order. + bool enable_message_ordering = 10; + + // A policy that specifies the conditions for this subscription's expiration. + // A subscription is considered active as long as any connected subscriber is + // successfully consuming messages from the subscription or is issuing + // operations on the subscription. If `expiration_policy` is not set, a + // *default policy* with `ttl` of 31 days will be used. The minimum allowed + // value for `expiration_policy.ttl` is 1 day. + ExpirationPolicy expiration_policy = 11; + + // An expression written in the Pub/Sub [filter + // language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, + // then only `PubsubMessage`s whose `attributes` field matches the filter are + // delivered on this subscription. If empty, then no messages are filtered + // out. + string filter = 12; + + // A policy that specifies the conditions for dead lettering messages in + // this subscription. If dead_letter_policy is not set, dead lettering + // is disabled. + // + // The Cloud Pub/Sub service account associated with this subscriptions's + // parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + // permission to Acknowledge() messages on this subscription. + DeadLetterPolicy dead_letter_policy = 13; + + // A policy that specifies how Pub/Sub retries message delivery for this + // subscription. + // + // If not set, the default retry policy is applied. This generally implies + // that messages will be retried as soon as possible for healthy subscribers. + // RetryPolicy will be triggered on NACKs or acknowledgement deadline + // exceeded events for a given message. + RetryPolicy retry_policy = 14; + + // Indicates whether the subscription is detached from its topic. Detached + // subscriptions don't receive messages from their topic and don't retain any + // backlog. `Pull` and `StreamingPull` requests will return + // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to + // the endpoint will not be made. + bool detached = 15; +} + +// A policy that specifies how Cloud Pub/Sub retries message delivery. +// +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// +// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded +// events for a given message. +// +// Retry Policy is implemented on a best effort basis. At times, the delay +// between consecutive deliveries may not match the configuration. That is, +// delay can be more or less than configured backoff. +message RetryPolicy { + // The minimum delay between consecutive deliveries of a given message. + // Value should be between 0 and 600 seconds. Defaults to 10 seconds. + google.protobuf.Duration minimum_backoff = 1; + + // The maximum delay between consecutive deliveries of a given message. + // Value should be between 0 and 600 seconds. Defaults to 600 seconds. + google.protobuf.Duration maximum_backoff = 2; +} + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. +// +// If validation on any of the fields fails at subscription creation/updation, +// the create/update subscription request will fail. +message DeadLetterPolicy { + // The name of the topic to which dead letter messages should be published. + // Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service + // account associated with the enclosing subscription's parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + // permission to Publish() to this topic. + // + // The operation will fail if the topic does not exist. + // Users should ensure that there is a subscription attached to this topic + // since messages published to a topic with no subscriptions are lost. + string dead_letter_topic = 1; + + // The maximum number of delivery attempts for any message. The value must be + // between 5 and 100. + // + // The number of delivery attempts is defined as 1 + (the sum of number of + // NACKs and number of times the acknowledgement deadline has been exceeded + // for the message). + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that + // client libraries may automatically extend ack_deadlines. + // + // This field will be honored on a best effort basis. + // + // If this parameter is 0, a default value of 5 is used. + int32 max_delivery_attempts = 2; +} + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +message ExpirationPolicy { + // Specifies the "time-to-live" duration for an associated resource. The + // resource expires if it is not active for a period of `ttl`. The definition + // of "activity" depends on the type of the associated resource. The minimum + // and maximum allowed values for `ttl` depend on the type of the associated + // resource, as well. If `ttl` is not set, the associated resource never + // expires. + google.protobuf.Duration ttl = 1; +} + +// Configuration for a push delivery endpoint. +message PushConfig { + // Contains information needed for generating an + // [OpenID Connect + // token](https://developers.google.com/identity/protocols/OpenIDConnect). + message OidcToken { + // [Service account + // email](https://cloud.google.com/iam/docs/service-accounts) + // to be used for generating the OIDC token. The caller (for + // CreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) must + // have the iam.serviceAccounts.actAs permission for the service account. + string service_account_email = 1; + + // Audience to be used when generating OIDC token. The audience claim + // identifies the recipients that the JWT is intended for. The audience + // value is a single case-sensitive string. Having multiple values (array) + // for the audience field is not supported. More info about the OIDC JWT + // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 + // Note: if not specified, the Push endpoint URL will be used. + string audience = 2; + } + + // A URL locating the endpoint to which messages should be pushed. + // For example, a Webhook endpoint might use `https://example.com/push`. + string push_endpoint = 1; + + // Endpoint configuration attributes that can be used to control different + // aspects of the message delivery. + // + // The only currently supported attribute is `x-goog-version`, which you can + // use to change the format of the pushed message. This attribute + // indicates the version of the data expected by the endpoint. This + // controls the shape of the pushed message (i.e., its fields and metadata). + // + // If not present during the `CreateSubscription` call, it will default to + // the version of the Pub/Sub API used to make such call. If not present in a + // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` + // calls will always return a valid version, even if the subscription was + // created without this attribute. + // + // The only supported values for the `x-goog-version` attribute are: + // + // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. + // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. + // + // For example: + //
attributes { "x-goog-version": "v1" } 
+ map attributes = 2; + + // An authentication method used by push endpoints to verify the source of + // push requests. This can be used with push endpoints that are private by + // default to allow requests only from the Cloud Pub/Sub system, for example. + // This field is optional and should be set only by users interested in + // authenticated push. + oneof authentication_method { + // If specified, Pub/Sub will generate and attach an OIDC JWT token as an + // `Authorization` header in the HTTP request for every pushed message. + OidcToken oidc_token = 3; + } +} + +// A message and its corresponding acknowledgment ID. +message ReceivedMessage { + // This ID can be used to acknowledge the received message. + string ack_id = 1; + + // The message. + PubsubMessage message = 2; + + // The approximate number of times that Cloud Pub/Sub has attempted to deliver + // the associated message to a subscriber. + // + // More precisely, this is 1 + (number of NACKs) + + // (number of ack_deadline exceeds) for this message. + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline + // exceeds event is whenever a message is not acknowledged within + // ack_deadline. Note that ack_deadline is initially + // Subscription.ackDeadlineSeconds, but may get extended automatically by + // the client library. + // + // Upon the first delivery of a given message, `delivery_attempt` will have a + // value of 1. The value is calculated at best effort and is approximate. + // + // If a DeadLetterPolicy is not set on the subscription, this will be 0. + int32 delivery_attempt = 3; +} + +// Request for the GetSubscription method. +message GetSubscriptionRequest { + // Required. The name of the subscription to get. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; +} + +// Request for the UpdateSubscription method. +message UpdateSubscriptionRequest { + // Required. The updated subscription object. + Subscription subscription = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Indicates which fields in the provided subscription to update. + // Must be specified and non-empty. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// Request for the `ListSubscriptions` method. +message ListSubscriptionsRequest { + // Required. The name of the project in which to list subscriptions. + // Format is `projects/{project-id}`. + string project = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Maximum number of subscriptions to return. + int32 page_size = 2; + + // The value returned by the last `ListSubscriptionsResponse`; indicates that + // this is a continuation of a prior `ListSubscriptions` call, and that the + // system should return the next page of data. + string page_token = 3; +} + +// Response for the `ListSubscriptions` method. +message ListSubscriptionsResponse { + // The subscriptions that match the request. + repeated Subscription subscriptions = 1; + + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListSubscriptionsRequest` to get more subscriptions. + string next_page_token = 2; +} + +// Request for the DeleteSubscription method. +message DeleteSubscriptionRequest { + // Required. The subscription to delete. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; +} + +// Request for the ModifyPushConfig method. +message ModifyPushConfigRequest { + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // Required. The push configuration for future deliveries. + // + // An empty `pushConfig` indicates that the Pub/Sub system should + // stop pushing messages from the given subscription and allow + // messages to be pulled and acknowledged - effectively pausing + // the subscription if `Pull` or `StreamingPull` is not called. + PushConfig push_config = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for the `Pull` method. +message PullRequest { + // Required. The subscription from which messages should be pulled. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // Optional. If this field set to true, the system will respond immediately + // even if it there are no messages available to return in the `Pull` + // response. Otherwise, the system may wait (for a bounded amount of time) + // until at least one message is available, rather than returning no messages. + // Warning: setting this field to `true` is discouraged because it adversely + // impacts the performance of `Pull` operations. We recommend that users do + // not set this field. + bool return_immediately = 2 + [deprecated = true, (google.api.field_behavior) = OPTIONAL]; + + // Required. The maximum number of messages to return for this request. Must + // be a positive integer. The Pub/Sub system may return fewer than the number + // specified. + int32 max_messages = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Response for the `Pull` method. +message PullResponse { + // Received Pub/Sub messages. The list will be empty if there are no more + // messages available in the backlog. For JSON, the response can be entirely + // empty. The Pub/Sub system may return fewer than the `maxMessages` requested + // even if there are more messages available in the backlog. + repeated ReceivedMessage received_messages = 1; +} + +// Request for the ModifyAckDeadline method. +message ModifyAckDeadlineRequest { + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // Required. List of acknowledgment IDs. + repeated string ack_ids = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. The new ack deadline with respect to the time this request was + // sent to the Pub/Sub system. For example, if the value is 10, the new ack + // deadline will expire 10 seconds after the `ModifyAckDeadline` call was + // made. Specifying zero might immediately make the message available for + // delivery to another subscriber client. This typically results in an + // increase in the rate of message redeliveries (that is, duplicates). + // The minimum deadline you can specify is 0 seconds. + // The maximum deadline you can specify is 600 seconds (10 minutes). + int32 ack_deadline_seconds = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for the Acknowledge method. +message AcknowledgeRequest { + // Required. The subscription whose message is being acknowledged. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // Required. The acknowledgment ID for the messages being acknowledged that + // was returned by the Pub/Sub system in the `Pull` response. Must not be + // empty. + repeated string ack_ids = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request for the `StreamingPull` streaming RPC method. This request is used to +// establish the initial stream as well as to stream acknowledgements and ack +// deadline modifications from the client to the server. +message StreamingPullRequest { + // Required. The subscription for which to initialize the new stream. This + // must be provided in the first request on the stream, and must not be set in + // subsequent requests from client to server. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // List of acknowledgement IDs for acknowledging previously received messages + // (received on this stream or a different stream). If an ack ID has expired, + // the corresponding message may be redelivered later. Acknowledging a message + // more than once will not result in an error. If the acknowledgement ID is + // malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + repeated string ack_ids = 2; + + // The list of new ack deadlines for the IDs listed in + // `modify_deadline_ack_ids`. The size of this list must be the same as the + // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted + // with `INVALID_ARGUMENT`. Each element in this list is applied to the + // element in the same position in `modify_deadline_ack_ids`. The new ack + // deadline is with respect to the time this request was sent to the Pub/Sub + // system. Must be >= 0. For example, if the value is 10, the new ack deadline + // will expire 10 seconds after this request is received. If the value is 0, + // the message is immediately made available for another streaming or + // non-streaming pull request. If the value is < 0 (an error), the stream will + // be aborted with status `INVALID_ARGUMENT`. + repeated int32 modify_deadline_seconds = 3; + + // List of acknowledgement IDs whose deadline will be modified based on the + // corresponding element in `modify_deadline_seconds`. This field can be used + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. + repeated string modify_deadline_ack_ids = 4; + + // Required. The ack deadline to use for the stream. This must be provided in + // the first request on the stream, but it can also be updated on subsequent + // requests from client to server. The minimum deadline you can specify is 10 + // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). + int32 stream_ack_deadline_seconds = 5 + [(google.api.field_behavior) = REQUIRED]; + + // A unique identifier that is used to distinguish client instances from each + // other. Only needs to be provided on the initial request. When a stream + // disconnects and reconnects for the same stream, the client_id should be set + // to the same value so that state associated with the old stream can be + // transferred to the new stream. The same client_id should not be used for + // different client instances. + string client_id = 6; + + // Flow control settings for the maximum number of outstanding messages. When + // there are `max_outstanding_messages` or more currently sent to the + // streaming pull client that have not yet been acked or nacked, the server + // stops sending more messages. The sending of messages resumes once the + // number of outstanding messages is less than this value. If the value is + // <= 0, there is no limit to the number of outstanding messages. This + // property can only be set on the initial StreamingPullRequest. If it is set + // on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + int64 max_outstanding_messages = 7; + + // Flow control settings for the maximum number of outstanding bytes. When + // there are `max_outstanding_bytes` or more worth of messages currently sent + // to the streaming pull client that have not yet been acked or nacked, the + // server will stop sending more messages. The sending of messages resumes + // once the number of outstanding bytes is less than this value. If the value + // is <= 0, there is no limit to the number of outstanding bytes. This + // property can only be set on the initial StreamingPullRequest. If it is set + // on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + int64 max_outstanding_bytes = 8; +} + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +message StreamingPullResponse { + // Received Pub/Sub messages. This will not be empty. + repeated ReceivedMessage received_messages = 1; +} + +// Request for the `CreateSnapshot` method. +message CreateSnapshotRequest { + // Required. User-provided name for this snapshot. If the name is not provided + // in the request, the server will assign a random name for this snapshot on + // the same project as the subscription. Note that for REST API requests, you + // must specify a name. See the resource + // name rules. Format is `projects/{project}/snapshots/{snap}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } + ]; + + // Required. The subscription whose backlog the snapshot retains. + // Specifically, the created snapshot is guaranteed to retain: + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // Format is `projects/{project}/subscriptions/{sub}`. + string subscription = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + // See Creating and + // managing labels. + map labels = 3; +} + +// Request for the UpdateSnapshot method. +message UpdateSnapshotRequest { + // Required. The updated snapshot object. + Snapshot snapshot = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Indicates which fields in the provided snapshot to update. + // Must be specified and non-empty. + google.protobuf.FieldMask update_mask = 2 + [(google.api.field_behavior) = REQUIRED]; +} + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +message Snapshot { + option (google.api.resource) = { + type: "pubsub.googleapis.com/Snapshot" + pattern: "projects/{project}/snapshots/{snapshot}" + }; + + // The name of the snapshot. + string name = 1; + + // The name of the topic from which this snapshot is retaining messages. + string topic = 2 [ + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } + ]; + + // The snapshot is guaranteed to exist up until this time. + // A newly-created snapshot expires no later than 7 days from the time of its + // creation. Its exact lifetime is determined at creation by the existing + // backlog in the source subscription. Specifically, the lifetime of the + // snapshot is `7 days - (age of oldest unacked message in the subscription)`. + // For example, consider a subscription whose oldest unacked message is 3 days + // old. If a snapshot is created from this subscription, the snapshot -- which + // will always capture this 3-day-old backlog as long as the snapshot + // exists -- will expire in 4 days. The service will refuse to create a + // snapshot that would expire in less than 1 hour after creation. + google.protobuf.Timestamp expire_time = 3; + + // See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + map labels = 4; +} + +// Request for the GetSnapshot method. +message GetSnapshotRequest { + // Required. The name of the snapshot to get. + // Format is `projects/{project}/snapshots/{snap}`. + string snapshot = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } + ]; +} + +// Request for the `ListSnapshots` method. +message ListSnapshotsRequest { + // Required. The name of the project in which to list snapshots. + // Format is `projects/{project-id}`. + string project = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Maximum number of snapshots to return. + int32 page_size = 2; + + // The value returned by the last `ListSnapshotsResponse`; indicates that this + // is a continuation of a prior `ListSnapshots` call, and that the system + // should return the next page of data. + string page_token = 3; +} + +// Response for the `ListSnapshots` method. +message ListSnapshotsResponse { + // The resulting snapshots. + repeated Snapshot snapshots = 1; + + // If not empty, indicates that there may be more snapshot that match the + // request; this value should be passed in a new `ListSnapshotsRequest`. + string next_page_token = 2; +} + +// Request for the `DeleteSnapshot` method. +message DeleteSnapshotRequest { + // Required. The name of the snapshot to delete. + // Format is `projects/{project}/snapshots/{snap}`. + string snapshot = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } + ]; +} + +// Request for the `Seek` method. +message SeekRequest { + // Required. The subscription to affect. + string subscription = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "pubsub.googleapis.com/Subscription" + } + ]; + + oneof target { + // The time to seek to. + // Messages retained in the subscription that were published before this + // time are marked as acknowledged, and messages retained in the + // subscription that were published after this time are marked as + // unacknowledged. Note that this operation affects only those messages + // retained in the subscription (configured by the combination of + // `message_retention_duration` and `retain_acked_messages`). For example, + // if `time` corresponds to a point before the message retention + // window (or to a point before the system's notion of the subscription + // creation time), only retained messages will be marked as unacknowledged, + // and already-expunged messages will not be restored. + google.protobuf.Timestamp time = 2; + + // The snapshot to seek to. The snapshot's topic must be the same as that of + // the provided subscription. + // Format is `projects/{project}/snapshots/{snap}`. + string snapshot = 3 [(google.api.resource_reference) = { + type: "pubsub.googleapis.com/Snapshot" + }]; + } +} + +// Response for the `Seek` method (this response is empty). +message SeekResponse {} diff --git a/flyrs/protos/google/pubsub/v1/schema.proto b/flyrs/protos/google/pubsub/v1/schema.proto new file mode 100644 index 0000000000..ae402ac4de --- /dev/null +++ b/flyrs/protos/google/pubsub/v1/schema.proto @@ -0,0 +1,289 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.pubsub.v1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/empty.proto"; + +option cc_enable_arenas = true; +option csharp_namespace = "Google.Cloud.PubSub.V1"; +option go_package = "google.golang.org/genproto/googleapis/pubsub/v1;pubsub"; +option java_multiple_files = true; +option java_outer_classname = "SchemaProto"; +option java_package = "com.google.pubsub.v1"; +option php_namespace = "Google\\Cloud\\PubSub\\V1"; +option ruby_package = "Google::Cloud::PubSub::V1"; + +// Service for doing schema-related operations. +// +// EXPERIMENTAL: The Schema service is in development and may not work yet. + +service SchemaService { + option (google.api.default_host) = "pubsub.googleapis.com"; + option (google.api.oauth_scopes) = + "https://www.googleapis.com/auth/cloud-platform," + "https://www.googleapis.com/auth/pubsub"; + + // Creates a schema. + rpc CreateSchema(CreateSchemaRequest) returns (Schema) { + option (google.api.http) = { + post: "/v1/{parent=projects/*}/schemas" + body: "schema" + }; + option (google.api.method_signature) = "parent,schema,schema_id"; + } + + // Gets a schema. + rpc GetSchema(GetSchemaRequest) returns (Schema) { + option (google.api.http) = { + get: "/v1/{name=projects/*/schemas/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists schemas in a project. + rpc ListSchemas(ListSchemasRequest) returns (ListSchemasResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*}/schemas" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a schema. + rpc DeleteSchema(DeleteSchemaRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/schemas/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Validates a schema. + rpc ValidateSchema(ValidateSchemaRequest) returns (ValidateSchemaResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*}/schemas:validate" + body: "*" + }; + option (google.api.method_signature) = "parent,schema"; + } + + // Validates a message against a schema. + rpc ValidateMessage(ValidateMessageRequest) + returns (ValidateMessageResponse) { + option (google.api.http) = { + post: "/v1/{parent=projects/*}/schemas:validateMessage" + body: "*" + }; + } +} + +// A schema resource. +message Schema { + option (google.api.resource) = { + type: "pubsub.googleapis.com/Schema" + pattern: "projects/{project}/schemas/{schema}" + }; + + // Possible schema definition types. + enum Type { + // Default value. This value is unused. + TYPE_UNSPECIFIED = 0; + + // A Protocol Buffer schema definition. + PROTOCOL_BUFFER = 1; + + // An Avro schema definition. + AVRO = 2; + } + + // Required. Name of the schema. + // Format is `projects/{project}/schemas/{schema}`. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // The type of the schema definition. + Type type = 2; + + // The definition of the schema. This should contain a string representing + // the full definition of the schema that is a valid schema definition of + // the type specified in `type`. + string definition = 3; +} + +// Request for the CreateSchema method. +message CreateSchemaRequest { + // Required. The name of the project in which to create the schema. + // Format is `projects/{project-id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + child_type: "pubsub.googleapis.com/Schema" + } + ]; + + // Required. The schema object to create. + // + // This schema's `name` parameter is ignored. The schema object returned + // by CreateSchema will have a `name` made using the given `parent` and + // `schema_id`. + Schema schema = 2 [(google.api.field_behavior) = REQUIRED]; + + // The ID to use for the schema, which will become the final component of + // the schema's resource name. + // + // See https://cloud.google.com/pubsub/docs/admin#resource_names for resource + // name constraints. + string schema_id = 3; +} + +// View of Schema object fields to be returned by GetSchema and ListSchemas. +enum SchemaView { + // The default / unset value. + // The API will default to the BASIC view. + SCHEMA_VIEW_UNSPECIFIED = 0; + + // Include the name and type of the schema, but not the definition. + BASIC = 1; + + // Include all Schema object fields. + FULL = 2; +} + +// Request for the GetSchema method. +message GetSchemaRequest { + // Required. The name of the schema to get. + // Format is `projects/{project}/schemas/{schema}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } + ]; + + // The set of fields to return in the response. If not set, returns a Schema + // with `name` and `type`, but not `definition`. Set to `FULL` to retrieve all + // fields. + SchemaView view = 2; +} + +// Request for the `ListSchemas` method. +message ListSchemasRequest { + // Required. The name of the project in which to list schemas. + // Format is `projects/{project-id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // The set of Schema fields to return in the response. If not set, returns + // Schemas with `name` and `type`, but not `definition`. Set to `FULL` to + // retrieve all fields. + SchemaView view = 2; + + // Maximum number of schemas to return. + int32 page_size = 3; + + // The value returned by the last `ListSchemasResponse`; indicates that + // this is a continuation of a prior `ListSchemas` call, and that the + // system should return the next page of data. + string page_token = 4; +} + +// Response for the `ListSchemas` method. +message ListSchemasResponse { + // The resulting schemas. + repeated Schema schemas = 1; + + // If not empty, indicates that there may be more schemas that match the + // request; this value should be passed in a new `ListSchemasRequest`. + string next_page_token = 2; +} + +// Request for the `DeleteSchema` method. +message DeleteSchemaRequest { + // Required. Name of the schema to delete. + // Format is `projects/{project}/schemas/{schema}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } + ]; +} + +// Request for the `ValidateSchema` method. +message ValidateSchemaRequest { + // Required. The name of the project in which to validate schemas. + // Format is `projects/{project-id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + // Required. The schema object to validate. + Schema schema = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response for the `ValidateSchema` method. +message ValidateSchemaResponse {} + +// Request for the `ValidateMessage` method. +message ValidateMessageRequest { + // Required. The name of the project in which to validate schemas. + // Format is `projects/{project-id}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "cloudresourcemanager.googleapis.com/Project" + } + ]; + + oneof schema_spec { + // Name of the schema against which to validate. + // + // Format is `projects/{project}/schemas/{schema}`. + string name = 2 [ + (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } + ]; + + // Ad-hoc schema against which to validate + Schema schema = 3; + } + + // Message to validate against the provided `schema_spec`. + bytes message = 4; + + // The encoding expected for messages + Encoding encoding = 5; +} + +// Response for the `ValidateMessage` method. +message ValidateMessageResponse {} + +// Possible encoding types for messages. +enum Encoding { + // Unspecified + ENCODING_UNSPECIFIED = 0; + + // JSON encoding + JSON = 1; + + // Binary encoding, as defined by the schema type. For some schema types, + // binary encoding may not be available. + BINARY = 2; +} diff --git a/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto b/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto new file mode 100644 index 0000000000..d63d3c87eb --- /dev/null +++ b/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/descriptor.proto"; +import "protoc-gen-openapiv2/options/openapiv2.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +extend google.protobuf.FileOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Swagger openapiv2_swagger = 1042; +} +extend google.protobuf.MethodOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Operation openapiv2_operation = 1042; +} +extend google.protobuf.MessageOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Schema openapiv2_schema = 1042; +} +extend google.protobuf.ServiceOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + Tag openapiv2_tag = 1042; +} +extend google.protobuf.FieldOptions { + // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. + // + // All IDs are the same, as assigned. It is okay that they are the same, as they extend + // different descriptor messages. + JSONSchema openapiv2_field = 1042; +} diff --git a/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto b/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto new file mode 100644 index 0000000000..9a17f021ce --- /dev/null +++ b/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto @@ -0,0 +1,720 @@ +syntax = "proto3"; + +package grpc.gateway.protoc_gen_openapiv2.options; + +import "google/protobuf/struct.proto"; + +option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; + +// Scheme describes the schemes supported by the OpenAPI Swagger +// and Operation objects. +enum Scheme { + UNKNOWN = 0; + HTTP = 1; + HTTPS = 2; + WS = 3; + WSS = 4; +} + +// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// schemes: HTTPS; +// consumes: "application/json"; +// produces: "application/json"; +// }; +// +message Swagger { + // Specifies the OpenAPI Specification version being used. It can be + // used by the OpenAPI UI and other clients to interpret the API listing. The + // value MUST be "2.0". + string swagger = 1; + // Provides metadata about the API. The metadata can be used by the + // clients if needed. + Info info = 2; + // The host (name or ip) serving the API. This MUST be the host only and does + // not include the scheme nor sub-paths. It MAY include a port. If the host is + // not included, the host serving the documentation is to be used (including + // the port). The host does not support path templating. + string host = 3; + // The base path on which the API is served, which is relative to the host. If + // it is not included, the API is served directly under the host. The value + // MUST start with a leading slash (/). The basePath does not support path + // templating. + // Note that using `base_path` does not change the endpoint paths that are + // generated in the resulting OpenAPI file. If you wish to use `base_path` + // with relatively generated OpenAPI paths, the `base_path` prefix must be + // manually removed from your `google.api.http` paths and your code changed to + // serve the API from the `base_path`. + string base_path = 4; + // The transfer protocol of the API. Values MUST be from the list: "http", + // "https", "ws", "wss". If the schemes is not included, the default scheme to + // be used is the one used to access the OpenAPI definition itself. + repeated Scheme schemes = 5; + // A list of MIME types the APIs can consume. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string consumes = 6; + // A list of MIME types the APIs can produce. This is global to all APIs but + // can be overridden on specific API calls. Value MUST be as described under + // Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'paths'. + reserved 8; + // field 9 is reserved for 'definitions', which at this time are already + // exposed as and customizable as proto messages. + reserved 9; + // An object to hold responses that can be used across operations. This + // property does not define global responses for all operations. + map responses = 10; + // Security scheme definitions that can be used across the specification. + SecurityDefinitions security_definitions = 11; + // A declaration of which security schemes are applied for the API as a whole. + // The list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). + // Individual operations can override this definition. + repeated SecurityRequirement security = 12; + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated Tag tags = 13; + // Additional external documentation. + ExternalDocumentation external_docs = 14; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 15; +} + +// `Operation` is a representation of OpenAPI v2 specification's Operation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject +// +// Example: +// +// service EchoService { +// rpc Echo(SimpleMessage) returns (SimpleMessage) { +// option (google.api.http) = { +// get: "/v1/example/echo/{id}" +// }; +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +// summary: "Get a message."; +// operation_id: "getMessage"; +// tags: "echo"; +// responses: { +// key: "200" +// value: { +// description: "OK"; +// } +// } +// }; +// } +// } +message Operation { + // A list of tags for API documentation control. Tags can be used for logical + // grouping of operations by resources or any other qualifier. + repeated string tags = 1; + // A short summary of what the operation does. For maximum readability in the + // swagger-ui, this field SHOULD be less than 120 characters. + string summary = 2; + // A verbose explanation of the operation behavior. GFM syntax can be used for + // rich text representation. + string description = 3; + // Additional external documentation for this operation. + ExternalDocumentation external_docs = 4; + // Unique string used to identify the operation. The id MUST be unique among + // all operations described in the API. Tools and libraries MAY use the + // operationId to uniquely identify an operation, therefore, it is recommended + // to follow common programming naming conventions. + string operation_id = 5; + // A list of MIME types the operation can consume. This overrides the consumes + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string consumes = 6; + // A list of MIME types the operation can produce. This overrides the produces + // definition at the OpenAPI Object. An empty value MAY be used to clear the + // global definition. Value MUST be as described under Mime Types. + repeated string produces = 7; + // field 8 is reserved for 'parameters'. + reserved 8; + // The list of possible responses as they are returned from executing this + // operation. + map responses = 9; + // The transfer protocol for the operation. Values MUST be from the list: + // "http", "https", "ws", "wss". The value overrides the OpenAPI Object + // schemes definition. + repeated Scheme schemes = 10; + // Declares this operation to be deprecated. Usage of the declared operation + // should be refrained. Default value is false. + bool deprecated = 11; + // A declaration of which security schemes are applied for this operation. The + // list of values describes alternative security schemes that can be used + // (that is, there is a logical OR between the security requirements). This + // definition overrides any declared top-level security. To remove a top-level + // security declaration, an empty array can be used. + repeated SecurityRequirement security = 12; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 13; + // Custom parameters such as HTTP request headers. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/ + // and https://swagger.io/specification/v2/#parameter-object. + Parameters parameters = 14; +} + +// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +// allow header parameters to be set here since we do not want users specifying custom non-header +// parameters beyond those inferred from the Protobuf schema. +// See: https://swagger.io/specification/v2/#parameter-object +message Parameters { + // `Headers` is one or more HTTP header parameter. + // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters + repeated HeaderParameter headers = 1; +} + +// `HeaderParameter` a HTTP header parameter. +// See: https://swagger.io/specification/v2/#parameter-object +message HeaderParameter { + // `Type` is a a supported HTTP header type. + // See https://swagger.io/specification/v2/#parameterType. + enum Type { + UNKNOWN = 0; + STRING = 1; + NUMBER = 2; + INTEGER = 3; + BOOLEAN = 4; + } + + // `Name` is the header name. + string name = 1; + // `Description` is a short description of the header. + string description = 2; + // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + // See: https://swagger.io/specification/v2/#parameterType. + Type type = 3; + // `Format` The extending format for the previously mentioned type. + string format = 4; + // `Required` indicates if the header is optional + bool required = 5; + // field 6 is reserved for 'items', but in OpenAPI-specific way. + reserved 6; + // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used. + reserved 7; +} + +// `Header` is a representation of OpenAPI v2 specification's Header object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject +// +message Header { + // `Description` is a short description of the header. + string description = 1; + // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + string type = 2; + // `Format` The extending format for the previously mentioned type. + string format = 3; + // field 4 is reserved for 'items', but in OpenAPI-specific way. + reserved 4; + // field 5 is reserved `Collection Format` Determines the format of the array if type array is used. + reserved 5; + // `Default` Declares the value of the header that the server will use if none is provided. + // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. + // Unlike JSON Schema this value MUST conform to the defined type for the header. + string default = 6; + // field 7 is reserved for 'maximum'. + reserved 7; + // field 8 is reserved for 'exclusiveMaximum'. + reserved 8; + // field 9 is reserved for 'minimum'. + reserved 9; + // field 10 is reserved for 'exclusiveMinimum'. + reserved 10; + // field 11 is reserved for 'maxLength'. + reserved 11; + // field 12 is reserved for 'minLength'. + reserved 12; + // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. + string pattern = 13; + // field 14 is reserved for 'maxItems'. + reserved 14; + // field 15 is reserved for 'minItems'. + reserved 15; + // field 16 is reserved for 'uniqueItems'. + reserved 16; + // field 17 is reserved for 'enum'. + reserved 17; + // field 18 is reserved for 'multipleOf'. + reserved 18; +} + +// `Response` is a representation of OpenAPI v2 specification's Response object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject +// +message Response { + // `Description` is a short description of the response. + // GFM syntax can be used for rich text representation. + string description = 1; + // `Schema` optionally defines the structure of the response. + // If `Schema` is not provided, it means there is no content to the response. + Schema schema = 2; + // `Headers` A list of headers that are sent with the response. + // `Header` name is expected to be a string in the canonical format of the MIME header key + // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey + map headers = 3; + // `Examples` gives per-mimetype response examples. + // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object + map examples = 4; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 5; +} + +// `Info` is a representation of OpenAPI v2 specification's Info object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// title: "Echo API"; +// version: "1.0"; +// description: ""; +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// }; +// ... +// }; +// +message Info { + // The title of the application. + string title = 1; + // A short description of the application. GFM syntax can be used for rich + // text representation. + string description = 2; + // The Terms of Service for the API. + string terms_of_service = 3; + // The contact information for the exposed API. + Contact contact = 4; + // The license information for the exposed API. + License license = 5; + // Provides the version of the application API (not to be confused + // with the specification version). + string version = 6; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 7; +} + +// `Contact` is a representation of OpenAPI v2 specification's Contact object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// contact: { +// name: "gRPC-Gateway project"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// email: "none@example.com"; +// }; +// ... +// }; +// ... +// }; +// +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. MUST be in the format of a + // URL. + string url = 2; + // The email address of the contact person/organization. MUST be in the format + // of an email address. + string email = 3; +} + +// `License` is a representation of OpenAPI v2 specification's License object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// info: { +// ... +// license: { +// name: "BSD 3-Clause License"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; +// }; +// ... +// }; +// ... +// }; +// +message License { + // The license name used for the API. + string name = 1; + // A URL to the license used for the API. MUST be in the format of a URL. + string url = 2; +} + +// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +// ExternalDocumentation object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject +// +// Example: +// +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +// ... +// external_docs: { +// description: "More about gRPC-Gateway"; +// url: "https://github.com/grpc-ecosystem/grpc-gateway"; +// } +// ... +// }; +// +message ExternalDocumentation { + // A short description of the target documentation. GFM syntax can be used for + // rich text representation. + string description = 1; + // The URL for the target documentation. Value MUST be in the format + // of a URL. + string url = 2; +} + +// `Schema` is a representation of OpenAPI v2 specification's Schema object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +message Schema { + JSONSchema json_schema = 1; + // Adds support for polymorphism. The discriminator is the schema property + // name that is used to differentiate between other schema that inherit this + // schema. The property name used MUST be defined at this schema and it MUST + // be in the required property list. When used, the value MUST be the name of + // this schema or any schema that inherits it. + string discriminator = 2; + // Relevant only for Schema "properties" definitions. Declares the property as + // "read only". This means that it MAY be sent as part of a response but MUST + // NOT be sent as part of the request. Properties marked as readOnly being + // true SHOULD NOT be in the required list of the defined schema. Default + // value is false. + bool read_only = 3; + // field 4 is reserved for 'xml'. + reserved 4; + // Additional external documentation for this schema. + ExternalDocumentation external_docs = 5; + // A free-form property to include an example of an instance for this schema in JSON. + // This is copied verbatim to the output. + string example = 6; +} + +// `JSONSchema` represents properties from JSON Schema taken, and as used, in +// the OpenAPI v2 spec. +// +// This includes changes made by OpenAPI v2. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject +// +// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, +// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json +// +// Example: +// +// message SimpleMessage { +// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +// json_schema: { +// title: "SimpleMessage" +// description: "A simple message." +// required: ["id"] +// } +// }; +// +// // Id represents the message identifier. +// string id = 1; [ +// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +// description: "The unique identifier of the simple message." +// }]; +// } +// +message JSONSchema { + // field 1 is reserved for '$id', omitted from OpenAPI v2. + reserved 1; + // field 2 is reserved for '$schema', omitted from OpenAPI v2. + reserved 2; + // Ref is used to define an external reference to include in the message. + // This could be a fully qualified proto message reference, and that type must + // be imported into the protofile. If no message is identified, the Ref will + // be used verbatim in the output. + // For example: + // `ref: ".google.protobuf.Timestamp"`. + string ref = 3; + // field 4 is reserved for '$comment', omitted from OpenAPI v2. + reserved 4; + // The title of the schema. + string title = 5; + // A short description of the schema. + string description = 6; + string default = 7; + bool read_only = 8; + // A free-form property to include a JSON example of this field. This is copied + // verbatim to the output swagger.json. Quotes must be escaped. + // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject + string example = 9; + double multiple_of = 10; + // Maximum represents an inclusive upper limit for a numeric instance. The + // value of MUST be a number, + double maximum = 11; + bool exclusive_maximum = 12; + // minimum represents an inclusive lower limit for a numeric instance. The + // value of MUST be a number, + double minimum = 13; + bool exclusive_minimum = 14; + uint64 max_length = 15; + uint64 min_length = 16; + string pattern = 17; + // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. + reserved 18; + // field 19 is reserved for 'items', but in OpenAPI-specific way. + // TODO(ivucica): add 'items'? + reserved 19; + uint64 max_items = 20; + uint64 min_items = 21; + bool unique_items = 22; + // field 23 is reserved for 'contains', omitted from OpenAPI v2. + reserved 23; + uint64 max_properties = 24; + uint64 min_properties = 25; + repeated string required = 26; + // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific + // way. TODO(ivucica): add 'additionalProperties'? + reserved 27; + // field 28 is reserved for 'definitions', omitted from OpenAPI v2. + reserved 28; + // field 29 is reserved for 'properties', but in OpenAPI-specific way. + // TODO(ivucica): add 'additionalProperties'? + reserved 29; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // patternProperties, dependencies, propertyNames, const + reserved 30 to 33; + // Items in 'array' must be unique. + repeated string array = 34; + + enum JSONSchemaSimpleTypes { + UNKNOWN = 0; + ARRAY = 1; + BOOLEAN = 2; + INTEGER = 3; + NULL = 4; + NUMBER = 5; + OBJECT = 6; + STRING = 7; + } + + repeated JSONSchemaSimpleTypes type = 35; + // `Format` + string format = 36; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: contentMediaType, contentEncoding, if, then, else + reserved 37 to 41; + // field 42 is reserved for 'allOf', but in OpenAPI-specific way. + // TODO(ivucica): add 'allOf'? + reserved 42; + // following fields are reserved, as the properties have been omitted from + // OpenAPI v2: + // anyOf, oneOf, not + reserved 43 to 45; + // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 + repeated string enum = 46; + + // Additional field level properties used when generating the OpenAPI v2 file. + FieldConfiguration field_configuration = 1001; + + // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. + // These properties are not defined by OpenAPIv2, but they are used to control the generation. + message FieldConfiguration { + // Alternative parameter name when used as path parameter. If set, this will + // be used as the complete parameter name when this field is used as a path + // parameter. Use this to avoid having auto generated path parameter names + // for overlapping paths. + string path_param_name = 47; + } + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 48; +} + +// `Tag` is a representation of OpenAPI v2 specification's Tag object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject +// +message Tag { + // The name of the tag. Use it to allow override of the name of a + // global Tag object, then use that name to reference the tag throughout the + // OpenAPI file. + string name = 1; + // A short description for the tag. GFM syntax can be used for rich text + // representation. + string description = 2; + // Additional external documentation for this tag. + ExternalDocumentation external_docs = 3; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 4; +} + +// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +// Security Definitions object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject +// +// A declaration of the security schemes available to be used in the +// specification. This does not enforce the security schemes on the operations +// and only serves to provide the relevant details for each scheme. +message SecurityDefinitions { + // A single security scheme definition, mapping a "name" to the scheme it + // defines. + map security = 1; +} + +// `SecurityScheme` is a representation of OpenAPI v2 specification's +// Security Scheme object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject +// +// Allows the definition of a security scheme that can be used by the +// operations. Supported schemes are basic authentication, an API key (either as +// a header or as a query parameter) and OAuth2's common flows (implicit, +// password, application and access code). +message SecurityScheme { + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + enum Type { + TYPE_INVALID = 0; + TYPE_BASIC = 1; + TYPE_API_KEY = 2; + TYPE_OAUTH2 = 3; + } + + // The location of the API key. Valid values are "query" or "header". + enum In { + IN_INVALID = 0; + IN_QUERY = 1; + IN_HEADER = 2; + } + + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + enum Flow { + FLOW_INVALID = 0; + FLOW_IMPLICIT = 1; + FLOW_PASSWORD = 2; + FLOW_APPLICATION = 3; + FLOW_ACCESS_CODE = 4; + } + + // The type of the security scheme. Valid values are "basic", + // "apiKey" or "oauth2". + Type type = 1; + // A short description for security scheme. + string description = 2; + // The name of the header or query parameter to be used. + // Valid for apiKey. + string name = 3; + // The location of the API key. Valid values are "query" or + // "header". + // Valid for apiKey. + In in = 4; + // The flow used by the OAuth2 security scheme. Valid values are + // "implicit", "password", "application" or "accessCode". + // Valid for oauth2. + Flow flow = 5; + // The authorization URL to be used for this flow. This SHOULD be in + // the form of a URL. + // Valid for oauth2/implicit and oauth2/accessCode. + string authorization_url = 6; + // The token URL to be used for this flow. This SHOULD be in the + // form of a URL. + // Valid for oauth2/password, oauth2/application and oauth2/accessCode. + string token_url = 7; + // The available scopes for the OAuth2 security scheme. + // Valid for oauth2. + Scopes scopes = 8; + // Custom properties that start with "x-" such as "x-foo" used to describe + // extra functionality that is not covered by the standard OpenAPI Specification. + // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ + map extensions = 9; +} + +// `SecurityRequirement` is a representation of OpenAPI v2 specification's +// Security Requirement object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject +// +// Lists the required security schemes to execute this operation. The object can +// have multiple security schemes declared in it which are all required (that +// is, there is a logical AND between the schemes). +// +// The name used for each property MUST correspond to a security scheme +// declared in the Security Definitions. +message SecurityRequirement { + // If the security scheme is of type "oauth2", then the value is a list of + // scope names required for the execution. For other security scheme types, + // the array MUST be empty. + message SecurityRequirementValue { + repeated string scope = 1; + } + // Each name must correspond to a security scheme which is declared in + // the Security Definitions. If the security scheme is of type "oauth2", + // then the value is a list of scope names required for the execution. + // For other security scheme types, the array MUST be empty. + map security_requirement = 1; +} + +// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +// +// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject +// +// Lists the available scopes for an OAuth2 security scheme. +message Scopes { + // Maps between a name of a scope to a short description of it (as the value + // of the property). + map scope = 1; +} diff --git a/flyrs/setup.sh b/flyrs/setup.sh new file mode 100644 index 0000000000..8bec45dae3 --- /dev/null +++ b/flyrs/setup.sh @@ -0,0 +1 @@ +export PB_OUT_DIR=gen/pb_rust/flyteidl/ diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs new file mode 100644 index 0000000000..3732282393 --- /dev/null +++ b/flyrs/src/lib.rs @@ -0,0 +1,144 @@ +use prost::Message; +use pyo3::prelude::*; +use pyo3::types::PyBytes; +use tokio::runtime::{Builder, Runtime}; +use tonic::{transport::{Channel}}; + +// We use env macro here, typically cannot have executable code, like `std:dev is_ok()`` in this case, directly in the global scope outside of function bodies in Rust. +// Need better error handling if environment variable is empty +pub mod datacatalog { + include!(concat!(env!("PB_OUT_DIR"), "datacatalog.rs")); +} +pub mod flyteidl { + pub mod admin { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.admin.rs")); + } + pub mod cache { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.cacheservice.rs")); + } + pub mod core { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.core.rs")); + } + pub mod event { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.event.rs")); + } + pub mod plugins { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.plugins.rs")); + pub mod kubeflow{ + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.plugins.kubeflow.rs")); + } + } + pub mod service { + include!(concat!(env!("PB_OUT_DIR"), "flyteidl.service.rs")); + } +} + + +use crate::flyteidl::service::{TaskGetResponse, admin_service_client::AdminServiceClient, signal_service_client, data_proxy_service_client}; +use crate::flyteidl::admin::{Task, ObjectGetRequest, ResourceListRequest, TaskExecutionGetRequest}; + +// Unlike the normal use case of PyO3, we don't have to add attribute macros such as #[pyclass] or #[pymethods] to all of our flyteidl structs. +// In this case, we only use PyO3 to expose the client class and its methods to Python (FlyteKit). +// It would be confusing to maintain two identical data class implementations in two languages. +// Additionally, it's too complex to get/set every member value of any nested high-level data structure. +// By design, Rust forbids implementing an external trait for an external struct. This can be tricky when using well-known types from the `prost_types` crate. +// Furthermore, there's no scalable way to add attribute macros while building protos without resorting to numerous hacky workarounds. + +#[pyclass(subclass)] +pub struct FlyteClient { + admin_service: AdminServiceClient, + runtime: Runtime, +} + +// Using temporary value(e.g., endpoint) in async is tricky w.r.t lifetime. +// The compiler will complain that the temporary value does not live long enough. +// TODO: figure out how to pass in the required initial args into constructor in a clean and neat way. +#[pymethods] +impl FlyteClient { + #[new] // Without this, you cannot construct the underlying class in Python. + pub fn new() -> PyResult { + let rt = Builder::new_multi_thread().enable_all().build().unwrap(); + // TODO: Create a channel then bind it to every stubs/clients instead of connecting everytime. + let stub = rt.block_on(AdminServiceClient::connect("http://localhost:30080")).unwrap(); + // TODO: Add more thoughtful error handling + Ok(FlyteClient { + runtime: rt, // The tokio runtime is used in a blocking manner now, left lots of investigation and TODOs behind. + admin_service: stub, + } + ) + } + + // fn parse_from_bytes(pb2_type, buf: &[u8]) { + // } + // fn serialize_tobytes(proto) { + // } + + pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { + let bytes = bytes_obj.as_bytes(); + let decoded: ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.get_task(req))).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + PyBytes::new(py, &buf).into() + } + + pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { + let bytes = bytes_obj.as_bytes(); + let decoded: ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.list_tasks(req))).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + PyBytes::new(py, &buf).into() + } + + pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { // PyResult> + let bytes = bytes_obj.as_bytes(); + println!("Received bytes: {:?}", bytes); + let decoded: Task = Message::decode(&bytes.to_vec()[..]).unwrap(); + println!("Parsed Task: {:?}", decoded); + let mut buf = vec![]; + decoded.encode(&mut buf).unwrap(); + println!("Serialized Task: {:?}", decoded); + // Returning bytes buffer + PyBytes::new(py, &buf).into() + } + +} + + +// Some trials +// fn tokio() -> &'static tokio::runtime::Runtime { +// use std::sync::OnceLock; +// static RT: OnceLock = OnceLock::new(); +// RT.get_or_init(|| tokio::runtime::Runtime::new().unwrap()) +// } +// async fn sleep(seconds: u64) -> u64 { +// let sleep = async move { tokio::time::sleep(std::time::Duration::from_secs(seconds)).await }; +// tokio().spawn(sleep).await.unwrap(); +// seconds +// } +// #[pyfunction] +// async fn async_sleep_asyncio(seconds: u64) -> PyResult { +// let t = sleep(seconds).await; +// Ok(t) +// } +// #[pyfunction] +// fn sum_as_string(a: usize, b: usize) -> PyResult { +// Ok((a + b).to_string()) +// } +#[pymodule] +fn flyrs(m: &Bound<'_, PyModule>) -> PyResult<()> { + // m.add_function(wrap_pyfunction!(sum_as_string, m)?)?; + // m.add_function(wrap_pyfunction!(async_sleep_asyncio, m)?)?; + m.add_class::()?; + Ok(()) +} diff --git a/flyrs/test_flytekit_remote.py b/flyrs/test_flytekit_remote.py new file mode 100644 index 0000000000..6415226053 --- /dev/null +++ b/flyrs/test_flytekit_remote.py @@ -0,0 +1,38 @@ +import timeit + +from flytekit.configuration import Config +from flytekit.remote import FlyteRemote + +PROJECT = "flytesnacks" +DOMAIN = "development" + +remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) +task_py = remote_py.fetch_task( + project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" +) +# print(task_py) + +remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN) +task_rs = remote_rs.fetch_task( + project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" +) +# print(task_rs) + +print(task_py == task_rs) + + +setup = """ +from flytekit.remote import FlyteRemote; +from flytekit.configuration import Config; +PROJECT = "flytesnacks"; +DOMAIN = "development"; +remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN); +remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN); +""" + +fetch_task_in_py = """task_py = remote_py.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" +fetch_task_in_rs = """task_rs = remote_rs.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" +# Python gRPC +print(sum(timeit.repeat(fetch_task_in_py, setup=setup, repeat=10, number=100))) +# Rust gRPC +print(sum(timeit.repeat(fetch_task_in_rs, setup=setup, repeat=10, number=100))) diff --git a/flytekit/clients/friendly_rs.py b/flytekit/clients/friendly_rs.py new file mode 100644 index 0000000000..048155bb65 --- /dev/null +++ b/flytekit/clients/friendly_rs.py @@ -0,0 +1,1045 @@ +import datetime +import typing + +from flyteidl.admin import common_pb2 as _common_pb2 +from flyteidl.admin import execution_pb2 as _execution_pb2 +from flyteidl.admin import launch_plan_pb2 as _launch_plan_pb2 +from flyteidl.admin import matchable_resource_pb2 as _matchable_resource_pb2 +from flyteidl.admin import node_execution_pb2 as _node_execution_pb2 +from flyteidl.admin import project_domain_attributes_pb2 as _project_domain_attributes_pb2 +from flyteidl.admin import project_pb2 as _project_pb2 +from flyteidl.admin import task_execution_pb2 as _task_execution_pb2 +from flyteidl.admin import task_pb2 as _task_pb2 +from flyteidl.admin import workflow_attributes_pb2 as _workflow_attributes_pb2 +from flyteidl.admin import workflow_pb2 as _workflow_pb2 +from flyteidl.service import dataproxy_pb2 as _data_proxy_pb2 +from google.protobuf.duration_pb2 import Duration + +import flyrs +from flytekit.models import common as _common +from flytekit.models import execution as _execution +from flytekit.models import filters as _filters +from flytekit.models import launch_plan as _launch_plan +from flytekit.models import node_execution as _node_execution +from flytekit.models import project as _project +from flytekit.models import task as _task +from flytekit.models.admin import common as _admin_common +from flytekit.models.admin import task_execution as _task_execution +from flytekit.models.admin import workflow as _workflow +from flytekit.models.core import identifier as _identifier + +# Lots of refactor jobs need to be done. +# Currently only refactored `get_task()` used by fetch_task() at FlyteRemote. + + +class RustSynchronousFlyteClient(flyrs.FlyteClient): + """ + This is a low-level client that users can use to make direct gRPC service calls to the control plane. See the + :std:doc:`service spec `. This is more user-friendly interface than the + :py:class:`raw client ` so users should try to use this class + first. Create a client by :: + + SynchronousFlyteClient("your.domain:port", insecure=True) + # insecure should be True if your flyteadmin deployment doesn't have SSL enabled + + """ + + @property + def raw(self): + """ + Gives access to the raw client + :rtype: flytekit.clients.raw.RawSynchronousFlyteClient + """ + return super(RustSynchronousFlyteClient, self) + + #################################################################################################################### + # + # Task Endpoints + # + #################################################################################################################### + + def create_task(self, task_identifer, task_spec): + """ + This will create a task definition in the Admin database. Once successful, the task object can be + retrieved via the client or viewed via the UI or command-line interfaces. + + .. note :: + + Overwrites are not supported so any request for a given project, domain, name, and version that exists in + the database must match the existing definition exactly. Furthermore, as long as the request + remains identical, calling this method multiple times will result in success. + + :param flytekit.models.core.identifier.Identifier task_identifer: The identifier for this task. + :param flytekit.models.task.TaskSpec task_spec: This is the actual definition of the task that + should be created. + :raises flytekit.common.exceptions.user.FlyteEntityAlreadyExistsException: If an identical version of the + task is found, this exception is raised. The client might choose to ignore this exception because the + identical task is already registered. + :raises grpc.RpcError: + """ + super(RustSynchronousFlyteClient, self).create_task( + _task_pb2.TaskCreateRequest(id=task_identifer.to_flyte_idl(), spec=task_spec.to_flyte_idl()) + ) + + def list_task_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): + """ + This returns a page of identifiers for the tasks for a given project and domain. Filters can also be + specified. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param Text project: The namespace of the project to list. + :param Text domain: The domain space of the project to list. + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param Text token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.common.NamedEntityIdentifier], Text + """ + identifier_list = super(RustSynchronousFlyteClient, self).list_task_ids_paginated( + _common_pb2.NamedEntityIdentifierListRequest( + project=project, + domain=domain, + limit=limit, + token=token, + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], + str(identifier_list.token), + ) + + def list_tasks_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): + """ + This returns a page of task metadata for tasks in a given project and domain. Optionally, + specifying a name will limit the results to only tasks with that name in the given project and domain. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param flytekit.models.common.NamedEntityIdentifier identifier: NamedEntityIdentifier to list. + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param list[flytekit.models.filters.Filter] filters: [Optional] If specified, the filters will be applied to + the query. If the filter is not supported, an exception will be raised. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.task.Task], Text + """ + task_list = super(RustSynchronousFlyteClient, self).list_tasks_paginated( + resource_list_request=_common_pb2.ResourceListRequest( + id=identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + # TODO: tmp workaround + for pb in task_list.tasks: + pb.id.resource_type = _identifier.ResourceType.TASK + return ( + [_task.Task.from_flyte_idl(task_pb2) for task_pb2 in task_list.tasks], + str(task_list.token), + ) + + def get_task(self, id): + """ + This returns a single task for a given identifier. + + :param flytekit.models.core.identifier.Identifier id: The ID representing a given task. + :raises: TODO + :rtype: flytekit.models.task.Task + """ + t = _task_pb2.Task() + t.ParseFromString( + super(RustSynchronousFlyteClient, self).get_task( + _common_pb2.ObjectGetRequest(id=id.to_flyte_idl()).SerializeToString() + ) + ) + return _task.Task.from_flyte_idl(t) + + #################################################################################################################### + # + # Workflow Endpoints + # + #################################################################################################################### + + def create_workflow(self, workflow_identifier, workflow_spec): + """ + This will create a workflow definition in the Admin database. Once successful, the workflow object can be + retrieved via the client or viewed via the UI or command-line interfaces. + + .. note :: + + Overwrites are not supported so any request for a given project, domain, name, and version that exists in + the database must match the existing definition exactly. Furthermore, as long as the request + remains identical, calling this method multiple times will result in success. + + :param: flytekit.models.core.identifier.Identifier workflow_identifier: The identifier for this workflow. + :param: flytekit.models.admin.workflow.WorkflowSpec workflow_spec: This is the actual definition of the workflow + that should be created. + :raises flytekit.common.exceptions.user.FlyteEntityAlreadyExistsException: If an identical version of the + workflow is found, this exception is raised. The client might choose to ignore this exception because the + identical workflow is already registered. + :raises grpc.RpcError: + """ + super(RustSynchronousFlyteClient, self).create_workflow( + _workflow_pb2.WorkflowCreateRequest( + id=workflow_identifier.to_flyte_idl(), spec=workflow_spec.to_flyte_idl() + ) + ) + + def list_workflow_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): + """ + This returns a page of identifiers for the workflows for a given project and domain. Filters can also be + specified. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param: Text project: The namespace of the project to list. + :param: Text domain: The domain space of the project to list. + :param: int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param: int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.common.NamedEntityIdentifier], Text + """ + identifier_list = super(RustSynchronousFlyteClient, self).list_workflow_ids_paginated( + _common_pb2.NamedEntityIdentifierListRequest( + project=project, + domain=domain, + limit=limit, + token=token, + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], + str(identifier_list.token), + ) + + def list_workflows_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): + """ + This returns a page of workflow meta-information for workflows in a given project and domain. Optionally, + specifying a name will limit the results to only workflows with that name in the given project and domain. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param flytekit.models.common.NamedEntityIdentifier identifier: NamedEntityIdentifier to list. + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param list[flytekit.models.filters.Filter] filters: [Optional] If specified, the filters will be applied to + the query. If the filter is not supported, an exception will be raised. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.admin.workflow.Workflow], Text + """ + wf_list = super(RustSynchronousFlyteClient, self).list_workflows_paginated( + resource_list_request=_common_pb2.ResourceListRequest( + id=identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + # TODO: tmp workaround + for pb in wf_list.workflows: + pb.id.resource_type = _identifier.ResourceType.WORKFLOW + return ( + [_workflow.Workflow.from_flyte_idl(wf_pb2) for wf_pb2 in wf_list.workflows], + str(wf_list.token), + ) + + def get_workflow(self, id): + """ + This returns a single workflow for a given ID. + + :param flytekit.models.core.identifier.Identifier id: The ID representing a given task. + :raises: TODO + :rtype: flytekit.models.admin.workflow.Workflow + """ + return _workflow.Workflow.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_workflow(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl())) + ) + + #################################################################################################################### + # + # Launch Plan Endpoints + # + #################################################################################################################### + + def create_launch_plan(self, launch_plan_identifer, launch_plan_spec): + """ + This will create a launch plan definition in the Admin database. Once successful, the launch plan object can be + retrieved via the client or viewed via the UI or command-line interfaces. + + .. note :: + + Overwrites are not supported so any request for a given project, domain, name, and version that exists in + the database must match the existing definition exactly. This also means that as long as the request + remains identical, calling this method multiple times will result in success. + + :param: flytekit.models.core.identifier.Identifier launch_plan_identifer: The identifier for this launch plan. + :param: flytekit.models.launch_plan.LaunchPlanSpec launch_plan_spec: This is the actual definition of the + launch plan that should be created. + :raises flytekit.common.exceptions.user.FlyteEntityAlreadyExistsException: If an identical version of the + launch plan is found, this exception is raised. The client might choose to ignore this exception because + the identical launch plan is already registered. + :raises grpc.RpcError: + """ + super(RustSynchronousFlyteClient, self).create_launch_plan( + _launch_plan_pb2.LaunchPlanCreateRequest( + id=launch_plan_identifer.to_flyte_idl(), + spec=launch_plan_spec.to_flyte_idl(), + ) + ) + + def get_launch_plan(self, id): + """ + Retrieves a launch plan entity. + + :param flytekit.models.core.identifier.Identifier id: unique identifier for launch plan to retrieve + :rtype: flytekit.models.launch_plan.LaunchPlan + """ + return _launch_plan.LaunchPlan.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_launch_plan(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl())) + ) + + def get_active_launch_plan(self, identifier): + """ + Retrieves the active launch plan entity given a named entity identifier (project, domain, name). Raises an + error if no active launch plan exists. + + :param flytekit.models.common.NamedEntityIdentifier identifier: NamedEntityIdentifier to list. + :rtype: flytekit.models.launch_plan.LaunchPlan + """ + return _launch_plan.LaunchPlan.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_active_launch_plan( + _launch_plan_pb2.ActiveLaunchPlanRequest(id=identifier.to_flyte_idl()) + ) + ) + + def list_launch_plan_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): + """ + This returns a page of identifiers for the launch plans for a given project and domain. Filters can also be + specified. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param: Text project: The namespace of the project to list. + :param: Text domain: The domain space of the project to list. + :param: int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param: int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.common.NamedEntityIdentifier], Text + """ + identifier_list = super(RustSynchronousFlyteClient, self).list_launch_plan_ids_paginated( + _common_pb2.NamedEntityIdentifierListRequest( + project=project, + domain=domain, + limit=limit, + token=token, + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], + str(identifier_list.token), + ) + + def list_launch_plans_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): + """ + This returns a page of launch plan meta-information for launch plans in a given project and domain. Optionally, + specifying a name will limit the results to only workflows with that name in the given project and domain. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param flytekit.models.common.NamedEntityIdentifier identifier: NamedEntityIdentifier to list. + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param list[flytekit.models.filters.Filter] filters: [Optional] If specified, the filters will be applied to + the query. If the filter is not supported, an exception will be raised. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.launch_plan.LaunchPlan], str + """ + lp_list = super(RustSynchronousFlyteClient, self).list_launch_plans_paginated( + resource_list_request=_common_pb2.ResourceListRequest( + id=identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + # TODO: tmp workaround + for pb in lp_list.launch_plans: + pb.id.resource_type = _identifier.ResourceType.LAUNCH_PLAN + return ( + [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in lp_list.launch_plans], + str(lp_list.token), + ) + + def list_active_launch_plans_paginated( + self, project, domain, limit=100, token=None, sort_by=None + ) -> typing.Tuple[typing.List[_launch_plan.LaunchPlan], str]: + """ + This returns a page of currently active launch plan meta-information for launch plans in a given project and + domain. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param Text project: + :param Text domain: + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param int token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: list[flytekit.models.launch_plan.LaunchPlan], str + """ + lp_list = super(RustSynchronousFlyteClient, self).list_active_launch_plans_paginated( + _launch_plan_pb2.ActiveLaunchPlanListRequest( + project=project, + domain=domain, + limit=limit, + token=token, + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + # TODO: tmp workaround + for pb in lp_list.launch_plans: + pb.id.resource_type = _identifier.ResourceType.LAUNCH_PLAN + return ( + [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in lp_list.launch_plans], + str(lp_list.token), + ) + + def update_launch_plan(self, id, state): + """ + Updates a launch plan. Currently, this can only be used to update a given launch plan's state (ACTIVE v. + INACTIVE) for schedules. If a launch plan with a given project, domain, and name is set to ACTIVE, + then any other launch plan with the same project, domain, and name that was set to ACTIVE will be switched to + INACTIVE in one transaction. + + :param flytekit.models.core.identifier.Identifier id: identifier for launch plan to update + :param int state: Enum value from flytekit.models.launch_plan.LaunchPlanState + """ + super(RustSynchronousFlyteClient, self).update_launch_plan( + _launch_plan_pb2.LaunchPlanUpdateRequest(id=id.to_flyte_idl(), state=state) + ) + + #################################################################################################################### + # + # Named Entity Endpoints + # + #################################################################################################################### + + def update_named_entity(self, resource_type, id, metadata): + """ + Updates the metadata associated with a named entity. A named entity is designated a resource, e.g. a workflow, + task or launch plan specified by {project, domain, name} across all versions of the resource. + + :param int resource_type: Enum value from flytekit.models.identifier.ResourceType + :param flytekit.models.admin.named_entity.NamedEntityIdentifier id: identifier for named entity to update + :param flytekit.models.admin.named_entity.NamedEntityIdentifierMetadata metadata: + """ + super(RustSynchronousFlyteClient, self).update_named_entity( + _common_pb2.NamedEntityUpdateRequest( + resource_type=resource_type, + id=id.to_flyte_idl(), + metadata=metadata.to_flyte_idl(), + ) + ) + + #################################################################################################################### + # + # Execution Endpoints + # + #################################################################################################################### + + def create_execution(self, project, domain, name, execution_spec, inputs): + """ + This will create an execution for the given execution spec. + :param Text project: + :param Text domain: + :param Text name: + :param flytekit.models.execution.ExecutionSpec execution_spec: This is the specification for the execution. + :param flytekit.models.literals.LiteralMap inputs: The inputs for the execution + :returns: The unique identifier for the execution. + :rtype: flytekit.models.core.identifier.WorkflowExecutionIdentifier + """ + return _identifier.WorkflowExecutionIdentifier.from_flyte_idl( + super(RustSynchronousFlyteClient, self) + .create_execution( + _execution_pb2.ExecutionCreateRequest( + project=project, + domain=domain, + name=name, + spec=execution_spec.to_flyte_idl(), + inputs=inputs.to_flyte_idl(), + ) + ) + .id + ) + + def recover_execution(self, id, name: str = None): + """ + Recreates a previously-run workflow execution that will only start executing from the last known failure point. + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier id: + :param name str: Optional name to assign to the newly created execution. + :rtype: flytekit.models.core.identifier.WorkflowExecutionIdentifier + """ + return _identifier.WorkflowExecutionIdentifier.from_flyte_idl( + super(RustSynchronousFlyteClient, self) + .recover_execution(_execution_pb2.ExecutionRecoverRequest(id=id.to_flyte_idl(), name=name)) + .id + ) + + def get_execution(self, id): + """ + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier id: + :rtype: flytekit.models.execution.Execution + """ + return _execution.Execution.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_execution( + _execution_pb2.WorkflowExecutionGetRequest(id=id.to_flyte_idl()) + ) + ) + + def get_execution_data(self, id): + """ + Returns signed URLs to LiteralMap blobs for an execution's inputs and outputs (when available). + + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier id: + :rtype: flytekit.models.execution.WorkflowExecutionGetDataResponse + """ + return _execution.WorkflowExecutionGetDataResponse.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_execution_data( + _execution_pb2.WorkflowExecutionGetDataRequest(id=id.to_flyte_idl()) + ) + ) + + def list_executions_paginated(self, project, domain, limit=100, token=None, filters=None, sort_by=None): + """ + This returns a page of executions in a given project and domain. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param Text project: Project in which to list executions. + :param Text domain: Project in which to list executions. + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param Text token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param list[flytekit.models.filters.Filter] filters: [Optional] If specified, the filters will be applied to + the query. If the filter is not supported, an exception will be raised. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises: TODO + :rtype: (list[flytekit.models.execution.Execution], Text) + """ + exec_list = super(RustSynchronousFlyteClient, self).list_executions_paginated( + resource_list_request=_common_pb2.ResourceListRequest( + id=_common_pb2.NamedEntityIdentifier(project=project, domain=domain), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_execution.Execution.from_flyte_idl(pb) for pb in exec_list.executions], + str(exec_list.token), + ) + + def terminate_execution(self, id, cause): + """ + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier id: + :param Text cause: + """ + super(RustSynchronousFlyteClient, self).terminate_execution( + _execution_pb2.ExecutionTerminateRequest(id=id.to_flyte_idl(), cause=cause) + ) + + def relaunch_execution(self, id, name=None): + """ + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier id: + :param Text name: [Optional] name for the new execution. If not specified, a randomly generated name will be + used + :returns: The unique identifier for the new execution. + :rtype: flytekit.models.core.identifier.WorkflowExecutionIdentifier + """ + return _identifier.WorkflowExecutionIdentifier.from_flyte_idl( + super(RustSynchronousFlyteClient, self) + .relaunch_execution(_execution_pb2.ExecutionRelaunchRequest(id=id.to_flyte_idl(), name=name)) + .id + ) + + #################################################################################################################### + # + # Node Execution Endpoints + # + #################################################################################################################### + + def get_node_execution(self, node_execution_identifier): + """ + :param flytekit.models.core.identifier.NodeExecutionIdentifier node_execution_identifier: + :rtype: flytekit.models.node_execution.NodeExecution + """ + return _node_execution.NodeExecution.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_node_execution( + _node_execution_pb2.NodeExecutionGetRequest(id=node_execution_identifier.to_flyte_idl()) + ) + ) + + def get_node_execution_data(self, node_execution_identifier) -> _execution.NodeExecutionGetDataResponse: + """ + Returns signed URLs to LiteralMap blobs for a node execution's inputs and outputs (when available). + + :param flytekit.models.core.identifier.NodeExecutionIdentifier node_execution_identifier: + """ + return _execution.NodeExecutionGetDataResponse.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_node_execution_data( + _node_execution_pb2.NodeExecutionGetDataRequest(id=node_execution_identifier.to_flyte_idl()) + ) + ) + + def list_node_executions( + self, + workflow_execution_identifier, + limit: int = 100, + token: typing.Optional[str] = None, + filters: typing.List[_filters.Filter] = None, + sort_by: _admin_common.Sort = None, + unique_parent_id: str = None, + ): + """Get node executions associated with a given workflow execution. + + :param flytekit.models.core.identifier.WorkflowExecutionIdentifier workflow_execution_identifier: + :param limit: Limit the number of items returned in the response. + :param token: If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify ``token="foo"``. + :param list[flytekit.models.filters.Filter] filters: + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :param unique_parent_id: If specified, returns the node executions for the ``unique_parent_id`` node id. + :rtype: list[flytekit.models.node_execution.NodeExecution], Text + """ + exec_list = super(RustSynchronousFlyteClient, self).list_node_executions_paginated( + _node_execution_pb2.NodeExecutionListRequest( + workflow_execution_id=workflow_execution_identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + unique_parent_id=unique_parent_id, + ) + ) + return ( + [_node_execution.NodeExecution.from_flyte_idl(e) for e in exec_list.node_executions], + str(exec_list.token), + ) + + def list_node_executions_for_task_paginated( + self, + task_execution_identifier, + limit=100, + token=None, + filters=None, + sort_by=None, + ): + """ + This returns nodes spawned by a specific task execution. This is generally from things like dynamic tasks. + :param flytekit.models.core.identifier.TaskExecutionIdentifier task_execution_identifier: + :param int limit: Number to return per page + :param Text token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". + :param list[flytekit.models.filters.Filter] filters: + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :rtype: list[flytekit.models.node_execution.NodeExecution], Text + """ + exec_list = self._stub.ListNodeExecutionsForTask( + _node_execution_pb2.NodeExecutionForTaskListRequest( + task_execution_id=task_execution_identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_node_execution.NodeExecution.from_flyte_idl(e) for e in exec_list.node_executions], + str(exec_list.token), + ) + + #################################################################################################################### + # + # Task Execution Endpoints + # + #################################################################################################################### + + def get_task_execution(self, id): + """ + :param flytekit.models.core.identifier.TaskExecutionIdentifier id: + :rtype: flytekit.models.admin.task_execution.TaskExecution + """ + return _task_execution.TaskExecution.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_task_execution( + _task_execution_pb2.TaskExecutionGetRequest(id=id.to_flyte_idl()) + ) + ) + + def get_task_execution_data(self, task_execution_identifier): + """ + Returns signed URLs to LiteralMap blobs for a node execution's inputs and outputs (when available). + + :param flytekit.models.core.identifier.TaskExecutionIdentifier task_execution_identifier: + :rtype: flytekit.models.execution.NodeExecutionGetDataResponse + """ + return _execution.TaskExecutionGetDataResponse.from_flyte_idl( + super(RustSynchronousFlyteClient, self).get_task_execution_data( + _task_execution_pb2.TaskExecutionGetDataRequest(id=task_execution_identifier.to_flyte_idl()) + ) + ) + + def list_task_executions_paginated( + self, + node_execution_identifier, + limit=100, + token=None, + filters=None, + sort_by=None, + ): + """ + :param flytekit.models.core.identifier.NodeExecutionIdentifier node_execution_identifier: + :param int limit: + :param Text token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". + :param list[flytekit.models.filters.Filter] filters: + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :rtype: (list[flytekit.models.admin.task_execution.TaskExecution], Text) + """ + exec_list = super(RustSynchronousFlyteClient, self).list_task_executions_paginated( + _task_execution_pb2.TaskExecutionListRequest( + node_execution_id=node_execution_identifier.to_flyte_idl(), + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_task_execution.TaskExecution.from_flyte_idl(e) for e in exec_list.task_executions], + str(exec_list.token), + ) + + #################################################################################################################### + # + # Project Endpoints + # + #################################################################################################################### + + def register_project(self, project): + """ + Registers a project. + :param flytekit.models.project.Project project: + :rtype: flyteidl.admin.project_pb2.ProjectRegisterResponse + """ + super(RustSynchronousFlyteClient, self).register_project( + _project_pb2.ProjectRegisterRequest( + project=project.to_flyte_idl(), + ) + ) + + def update_project(self, project): + """ + Update an existing project specified by id. + :param flytekit.models.project.Project project: + :rtype: flyteidl.admin.project_pb2.ProjectUpdateResponse + """ + super(RustSynchronousFlyteClient, self).update_project(project.to_flyte_idl()) + + def list_projects_paginated(self, limit=100, token=None, filters=None, sort_by=None): + """ + This returns a page of projects. + + .. note :: + + This is a paginated API. Use the token field in the request to specify a page offset token. + The user of the API is responsible for providing this token. + + .. note :: + + If entries are added to the database between requests for different pages, it is possible to receive + entries on the second page that also appeared on the first. + + :param int limit: [Optional] The maximum number of entries to return. Must be greater than 0. The maximum + page size is determined by the Flyte Admin Service configuration. If limit is greater than the maximum + page size, an exception will be raised. + :param Text token: [Optional] If specified, this specifies where in the rows of results to skip before reading. + If you previously retrieved a page response with token="foo" and you want the next page, + specify token="foo". Please see the notes for this function about the caveats of the paginated API. + :param list[flytekit.models.filters.Filter] filters: [Optional] If specified, the filters will be applied to + the query. If the filter is not supported, an exception will be raised. + :param flytekit.models.admin.common.Sort sort_by: [Optional] If provided, the results will be sorted. + :raises grpc.RpcError: + :rtype: (list[flytekit.models.Project], Text) + """ + projects = super(RustSynchronousFlyteClient, self).list_projects( + project_list_request=_project_pb2.ProjectListRequest( + limit=limit, + token=token, + filters=_filters.FilterList(filters or []).to_flyte_idl(), + sort_by=None if sort_by is None else sort_by.to_flyte_idl(), + ) + ) + return ( + [_project.Project.from_flyte_idl(pb) for pb in projects.projects], + str(projects.token), + ) + + #################################################################################################################### + # + # Matching Attributes Endpoints + # + #################################################################################################################### + + def update_project_domain_attributes(self, project, domain, matching_attributes): + """ + Sets custom attributes for a project and domain combination. + :param Text project: + :param Text domain: + :param flytekit.models.MatchingAttributes matching_attributes: + :return: + """ + super(RustSynchronousFlyteClient, self).update_project_domain_attributes( + _project_domain_attributes_pb2.ProjectDomainAttributesUpdateRequest( + attributes=_project_domain_attributes_pb2.ProjectDomainAttributes( + project=project, + domain=domain, + matching_attributes=matching_attributes.to_flyte_idl(), + ) + ) + ) + + def update_workflow_attributes(self, project, domain, workflow, matching_attributes): + """ + Sets custom attributes for a project, domain, and workflow combination. + :param Text project: + :param Text domain: + :param Text workflow: + :param flytekit.models.MatchingAttributes matching_attributes: + :return: + """ + super(RustSynchronousFlyteClient, self).update_workflow_attributes( + _workflow_attributes_pb2.WorkflowAttributesUpdateRequest( + attributes=_workflow_attributes_pb2.WorkflowAttributes( + project=project, + domain=domain, + workflow=workflow, + matching_attributes=matching_attributes.to_flyte_idl(), + ) + ) + ) + + def get_project_domain_attributes(self, project, domain, resource_type): + """ + Fetches the custom attributes set for a project and domain combination. + :param Text project: + :param Text domain: + :param flytekit.models.MatchableResource resource_type: + :return: + """ + return super(RustSynchronousFlyteClient, self).get_project_domain_attributes( + _project_domain_attributes_pb2.ProjectDomainAttributesGetRequest( + project=project, + domain=domain, + resource_type=resource_type, + ) + ) + + def get_workflow_attributes(self, project, domain, workflow, resource_type): + """ + Fetches the custom attributes set for a project, domain, and workflow combination. + :param Text project: + :param Text domain: + :param Text workflow: + :param flytekit.models.MatchableResource resource_type: + :return: + """ + return super(RustSynchronousFlyteClient, self).get_workflow_attributes( + _workflow_attributes_pb2.WorkflowAttributesGetRequest( + project=project, + domain=domain, + workflow=workflow, + resource_type=resource_type, + ) + ) + + def list_matchable_attributes(self, resource_type): + """ + Fetches all custom attributes for a resource type. + :param flytekit.models.MatchableResource resource_type: + :return: + """ + return super(RustSynchronousFlyteClient, self).list_matchable_attributes( + _matchable_resource_pb2.ListMatchableAttributesRequest( + resource_type=resource_type, + ) + ) + + def get_upload_signed_url( + self, + project: str, + domain: str, + content_md5: typing.Optional[bytes] = None, + filename: typing.Optional[str] = None, + expires_in: typing.Optional[datetime.timedelta] = None, + filename_root: typing.Optional[str] = None, + ) -> _data_proxy_pb2.CreateUploadLocationResponse: + """ + Get a signed url to be used during fast registration + + :param project: Project to create the upload location for + :param domain: Domain to create the upload location for + :param content_md5: ContentMD5 restricts the upload location to the specific MD5 provided. The content_md5 + will also appear in the generated path. + :param filename: If provided this specifies a desired suffix for the generated location + :param expires_in: If provided this defines a requested expiration duration for + the generated url + :param filename_root: If provided will be used as the root of the filename. If not, Admin will use a hash + This option is useful when uploading a series of files that you want to be grouped together. + :rtype: flyteidl.service.dataproxy_pb2.CreateUploadLocationResponse + """ + expires_in_pb = None + if expires_in: + expires_in_pb = Duration() + expires_in_pb.FromTimedelta(expires_in) + return super(RustSynchronousFlyteClient, self).create_upload_location( + _data_proxy_pb2.CreateUploadLocationRequest( + project=project, + domain=domain, + content_md5=content_md5, + filename=filename, + expires_in=expires_in_pb, + filename_root=filename_root, + ) + ) + + def get_download_signed_url( + self, native_url: str, expires_in: datetime.timedelta = None + ) -> _data_proxy_pb2.CreateDownloadLocationResponse: + expires_in_pb = None + if expires_in: + expires_in_pb = Duration() + expires_in_pb.FromTimedelta(expires_in) + return super(RustSynchronousFlyteClient, self).create_download_location( + _data_proxy_pb2.CreateDownloadLocationRequest( + native_url=native_url, + expires_in=expires_in_pb, + ) + ) + + def get_data(self, flyte_uri: str) -> _data_proxy_pb2.GetDataResponse: + req = _data_proxy_pb2.GetDataRequest(flyte_url=flyte_uri) + + resp = self._dataproxy_stub.GetData(req, metadata=self._metadata) + return resp diff --git a/flytekit/remote/remote.py b/flytekit/remote/remote.py index 0948de5065..9bd073d1c1 100644 --- a/flytekit/remote/remote.py +++ b/flytekit/remote/remote.py @@ -29,6 +29,7 @@ from flytekit import ImageSpec from flytekit.clients.friendly import SynchronousFlyteClient +from flytekit.clients.friendly_rs import RustSynchronousFlyteClient from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions from flytekit.configuration import Config, FastSerializationSettings, ImageConfig, SerializationSettings from flytekit.core import constants, utils @@ -192,6 +193,7 @@ class FlyteRemote(object): def __init__( self, config: Config, + enable_rs: bool = False, default_project: typing.Optional[str] = None, default_domain: typing.Optional[str] = None, data_upload_location: str = "flyte://my-s3-bucket/", @@ -212,6 +214,7 @@ def __init__( if data_upload_location is None: data_upload_location = FlyteContext.current_context().file_access.raw_output_prefix self._kwargs = kwargs + self._enable_rs = enable_rs self._client_initialized = False self._config = config # read config files, env vars, host, ssl options for admin client @@ -234,10 +237,13 @@ def context(self) -> FlyteContext: return self._ctx @property - def client(self) -> SynchronousFlyteClient: + def client(self): """Return a SynchronousFlyteClient for additional operations.""" if not self._client_initialized: - self._client = SynchronousFlyteClient(self.config.platform, **self._kwargs) + if self._enable_rs: + self._client = RustSynchronousFlyteClient() + else: + self._client = SynchronousFlyteClient(self.config.platform, **self._kwargs) self._client_initialized = True return self._client From 713b448871f5f5869ba53086a194957bbf2f4578 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 5 Apr 2024 00:58:02 +0800 Subject: [PATCH 02/16] [wip]refactor: add rpc service code Signed-off-by: Austin Liu --- flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs | 957 +++++ .../gen/pb_rust/flyteidl/flyteidl.admin.rs | 3341 ++++++++++++++++ .../pb_rust/flyteidl/flyteidl.cacheservice.rs | 399 ++ .../src/gen/pb_rust/flyteidl/flyteidl.core.rs | 3162 +++++++++++++++ .../gen/pb_rust/flyteidl/flyteidl.event.rs | 398 ++ .../flyteidl/flyteidl.plugins.kubeflow.rs | 207 + .../gen/pb_rust/flyteidl/flyteidl.plugins.rs | 346 ++ .../gen/pb_rust/flyteidl/flyteidl.service.rs | 3509 +++++++++++++++++ flyrs/src/gen/pb_rust/flyteidl/google.api.rs | 367 ++ ...pc.gateway.protoc_gen_openapiv2.options.rs | 1019 +++++ 10 files changed, 13705 insertions(+) create mode 100644 flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/google.api.rs create mode 100644 flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs diff --git a/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs b/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs new file mode 100644 index 0000000000..20bfd41962 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs @@ -0,0 +1,957 @@ +/// +/// Request message for creating a Dataset. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDatasetRequest { + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, +} +/// +/// Response message for creating a Dataset +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDatasetResponse {} +/// +/// Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier +/// which is a combination of several fields. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatasetRequest { + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, +} +/// +/// Response message for retrieving a Dataset. The response will include the metadata for the +/// Dataset. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatasetResponse { + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, +} +/// +/// Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that +/// can be one of artifact_id or tag. The result returned will include the artifact data and metadata +/// associated with the artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetArtifactRequest { + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, + #[prost(oneof = "get_artifact_request::QueryHandle", tags = "2, 3")] + pub query_handle: ::core::option::Option, +} +/// Nested message and enum types in `GetArtifactRequest`. +pub mod get_artifact_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum QueryHandle { + #[prost(string, tag = "2")] + ArtifactId(::prost::alloc::string::String), + #[prost(string, tag = "3")] + TagName(::prost::alloc::string::String), + } +} +/// +/// Response message for retrieving an Artifact. The result returned will include the artifact data +/// and metadata associated with the artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetArtifactResponse { + #[prost(message, optional, tag = "1")] + pub artifact: ::core::option::Option, +} +/// +/// Request message for creating an Artifact and its associated artifact Data. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateArtifactRequest { + #[prost(message, optional, tag = "1")] + pub artifact: ::core::option::Option, +} +/// +/// Response message for creating an Artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateArtifactResponse {} +/// +/// Request message for tagging an Artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AddTagRequest { + #[prost(message, optional, tag = "1")] + pub tag: ::core::option::Option, +} +/// +/// Response message for tagging an Artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AddTagResponse {} +/// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListArtifactsRequest { + /// Use a datasetID for which you want to retrieve the artifacts + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, + /// Apply the filter expression to this query + #[prost(message, optional, tag = "2")] + pub filter: ::core::option::Option, + /// Pagination options to get a page of artifacts + #[prost(message, optional, tag = "3")] + pub pagination: ::core::option::Option, +} +/// Response to list artifacts +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListArtifactsResponse { + /// The list of artifacts + #[prost(message, repeated, tag = "1")] + pub artifacts: ::prost::alloc::vec::Vec, + /// Token to use to request the next page, pass this into the next requests PaginationOptions + #[prost(string, tag = "2")] + pub next_token: ::prost::alloc::string::String, +} +/// List the datasets for the given query +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDatasetsRequest { + /// Apply the filter expression to this query + #[prost(message, optional, tag = "1")] + pub filter: ::core::option::Option, + /// Pagination options to get a page of datasets + #[prost(message, optional, tag = "2")] + pub pagination: ::core::option::Option, +} +/// List the datasets response with token for next pagination +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDatasetsResponse { + /// The list of datasets + #[prost(message, repeated, tag = "1")] + pub datasets: ::prost::alloc::vec::Vec, + /// Token to use to request the next page, pass this into the next requests PaginationOptions + #[prost(string, tag = "2")] + pub next_token: ::prost::alloc::string::String, +} +/// +/// Request message for updating an Artifact and overwriting its associated ArtifactData. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateArtifactRequest { + /// ID of dataset the artifact is associated with + #[prost(message, optional, tag = "1")] + pub dataset: ::core::option::Option, + /// List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + /// ArtifactData entries will be removed from the underlying blob storage and database. + #[prost(message, repeated, tag = "4")] + pub data: ::prost::alloc::vec::Vec, + /// Update execution metadata(including execution domain, name, node, project data) when overwriting cache + #[prost(message, optional, tag = "5")] + pub metadata: ::core::option::Option, + /// Either ID of artifact or name of tag to retrieve existing artifact from + #[prost(oneof = "update_artifact_request::QueryHandle", tags = "2, 3")] + pub query_handle: ::core::option::Option, +} +/// Nested message and enum types in `UpdateArtifactRequest`. +pub mod update_artifact_request { + /// Either ID of artifact or name of tag to retrieve existing artifact from + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum QueryHandle { + #[prost(string, tag = "2")] + ArtifactId(::prost::alloc::string::String), + #[prost(string, tag = "3")] + TagName(::prost::alloc::string::String), + } +} +/// +/// Response message for updating an Artifact. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateArtifactResponse { + /// The unique ID of the artifact updated + #[prost(string, tag = "1")] + pub artifact_id: ::prost::alloc::string::String, +} +/// +/// ReservationID message that is composed of several string fields. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReservationId { + /// The unique ID for the reserved dataset + #[prost(message, optional, tag = "1")] + pub dataset_id: ::core::option::Option, + /// The specific artifact tag for the reservation + #[prost(string, tag = "2")] + pub tag_name: ::prost::alloc::string::String, +} +/// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationRequest { + /// The unique ID for the reservation + #[prost(message, optional, tag = "1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag = "3")] + pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, +} +/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Reservation { + /// The unique ID for the reservation + #[prost(message, optional, tag = "1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, + /// Recommended heartbeat interval to extend reservation + #[prost(message, optional, tag = "3")] + pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, + /// Expiration timestamp of this reservation + #[prost(message, optional, tag = "4")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, + /// Free-form metadata associated with the artifact + #[prost(message, optional, tag = "6")] + pub metadata: ::core::option::Option, +} +/// Response including either a newly minted reservation or the existing reservation +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationResponse { + /// The reservation to be acquired or extended + #[prost(message, optional, tag = "1")] + pub reservation: ::core::option::Option, +} +/// Request to release reservation +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationRequest { + /// The unique ID for the reservation + #[prost(message, optional, tag = "1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, +} +/// Response to release reservation +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationResponse {} +/// +/// Dataset message. It is uniquely identified by DatasetID. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Dataset { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, + #[prost(string, repeated, tag = "3")] + pub partition_keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// +/// An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Partition { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +/// +/// DatasetID message that is composed of several string fields. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatasetId { + /// The name of the project + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// The name of the dataset + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// The domain (eg. environment) + #[prost(string, tag = "3")] + pub domain: ::prost::alloc::string::String, + /// Version of the data schema + #[prost(string, tag = "4")] + pub version: ::prost::alloc::string::String, + /// UUID for the dataset (if set the above fields are optional) + #[prost(string, tag = "5")] + pub uuid: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "6")] + pub org: ::prost::alloc::string::String, +} +/// +/// Artifact message. It is composed of several string fields. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Artifact { + /// The unique ID of the artifact + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// The Dataset that the artifact belongs to + #[prost(message, optional, tag = "2")] + pub dataset: ::core::option::Option, + /// A list of data that is associated with the artifact + #[prost(message, repeated, tag = "3")] + pub data: ::prost::alloc::vec::Vec, + /// Free-form metadata associated with the artifact + #[prost(message, optional, tag = "4")] + pub metadata: ::core::option::Option, + #[prost(message, repeated, tag = "5")] + pub partitions: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag = "6")] + pub tags: ::prost::alloc::vec::Vec, + /// creation timestamp of artifact, autogenerated by service + #[prost(message, optional, tag = "7")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// +/// ArtifactData that belongs to an artifact +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactData { + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// +/// Tag message that is unique to a Dataset. It is associated to a single artifact and +/// can be retrieved by name later. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tag { + /// Name of tag + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The tagged artifact + #[prost(string, tag = "2")] + pub artifact_id: ::prost::alloc::string::String, + /// The Dataset that this tag belongs to + #[prost(message, optional, tag = "3")] + pub dataset: ::core::option::Option, +} +/// +/// Metadata representation for artifacts and datasets +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// key map is a dictionary of key/val strings that represent metadata + #[prost(map = "string, string", tag = "1")] + pub key_map: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Filter expression that is composed of a combination of single filters +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FilterExpression { + #[prost(message, repeated, tag = "1")] + pub filters: ::prost::alloc::vec::Vec, +} +/// A single property to filter on. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SinglePropertyFilter { + /// field 10 in case we add more entities to query + #[prost(enumeration = "single_property_filter::ComparisonOperator", tag = "10")] + pub operator: i32, + #[prost(oneof = "single_property_filter::PropertyFilter", tags = "1, 2, 3, 4")] + pub property_filter: ::core::option::Option, +} +/// Nested message and enum types in `SinglePropertyFilter`. +pub mod single_property_filter { + /// as use-cases come up we can add more operators, ex: gte, like, not eq etc. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ComparisonOperator { + Equals = 0, + } + impl ComparisonOperator { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ComparisonOperator::Equals => "EQUALS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EQUALS" => Some(Self::Equals), + _ => None, + } + } + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum PropertyFilter { + #[prost(message, tag = "1")] + TagFilter(super::TagPropertyFilter), + #[prost(message, tag = "2")] + PartitionFilter(super::PartitionPropertyFilter), + #[prost(message, tag = "3")] + ArtifactFilter(super::ArtifactPropertyFilter), + #[prost(message, tag = "4")] + DatasetFilter(super::DatasetPropertyFilter), + } +} +/// Artifact properties we can filter by +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactPropertyFilter { + /// oneof because we can add more properties in the future + #[prost(oneof = "artifact_property_filter::Property", tags = "1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `ArtifactPropertyFilter`. +pub mod artifact_property_filter { + /// oneof because we can add more properties in the future + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag = "1")] + ArtifactId(::prost::alloc::string::String), + } +} +/// Tag properties we can filter by +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TagPropertyFilter { + #[prost(oneof = "tag_property_filter::Property", tags = "1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `TagPropertyFilter`. +pub mod tag_property_filter { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag = "1")] + TagName(::prost::alloc::string::String), + } +} +/// Partition properties we can filter by +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionPropertyFilter { + #[prost(oneof = "partition_property_filter::Property", tags = "1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `PartitionPropertyFilter`. +pub mod partition_property_filter { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(message, tag = "1")] + KeyVal(super::KeyValuePair), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValuePair { + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +/// Dataset properties we can filter by +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatasetPropertyFilter { + #[prost(oneof = "dataset_property_filter::Property", tags = "1, 2, 3, 4, 5")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `DatasetPropertyFilter`. +pub mod dataset_property_filter { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag = "1")] + Project(::prost::alloc::string::String), + #[prost(string, tag = "2")] + Name(::prost::alloc::string::String), + #[prost(string, tag = "3")] + Domain(::prost::alloc::string::String), + #[prost(string, tag = "4")] + Version(::prost::alloc::string::String), + /// Optional, org key applied to the dataset. + #[prost(string, tag = "5")] + Org(::prost::alloc::string::String), + } +} +/// Pagination options for making list requests +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PaginationOptions { + /// the max number of results to return + #[prost(uint32, tag = "1")] + pub limit: u32, + /// the token to pass to fetch the next page + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, + /// the property that we want to sort the results by + #[prost(enumeration = "pagination_options::SortKey", tag = "3")] + pub sort_key: i32, + /// the sort order of the results + #[prost(enumeration = "pagination_options::SortOrder", tag = "4")] + pub sort_order: i32, +} +/// Nested message and enum types in `PaginationOptions`. +pub mod pagination_options { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SortOrder { + Descending = 0, + Ascending = 1, + } + impl SortOrder { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SortOrder::Descending => "DESCENDING", + SortOrder::Ascending => "ASCENDING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DESCENDING" => Some(Self::Descending), + "ASCENDING" => Some(Self::Ascending), + _ => None, + } + } + } + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SortKey { + CreationTime = 0, + } + impl SortKey { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SortKey::CreationTime => "CREATION_TIME", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CREATION_TIME" => Some(Self::CreationTime), + _ => None, + } + } + } +} +/// Generated client implementations. +pub mod data_catalog_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + /// Data Catalog service definition + /// Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + /// Artifacts are associated with a Dataset, and can be tagged for retrieval. + #[derive(Debug, Clone)] + pub struct DataCatalogClient { + inner: tonic::client::Grpc, + } + impl DataCatalogClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataCatalogClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataCatalogClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DataCatalogClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + /// Each dataset can have one or more artifacts + pub async fn create_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/CreateDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateDataset")); + self.inner.unary(req, path, codec).await + } + /// Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + pub async fn get_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetDataset")); + self.inner.unary(req, path, codec).await + } + /// Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + /// files or data values + pub async fn create_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/CreateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateArtifact")); + self.inner.unary(req, path, codec).await + } + /// Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + pub async fn get_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetArtifact")); + self.inner.unary(req, path, codec).await + } + /// Associate a tag with an artifact. Tags are unique within a Dataset. + pub async fn add_tag( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/AddTag", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "AddTag")); + self.inner.unary(req, path, codec).await + } + /// Return a paginated list of artifacts + pub async fn list_artifacts( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ListArtifacts", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListArtifacts")); + self.inner.unary(req, path, codec).await + } + /// Return a paginated list of datasets + pub async fn list_datasets( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ListDatasets", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListDatasets")); + self.inner.unary(req, path, codec).await + } + /// Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + pub async fn update_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/UpdateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("datacatalog.DataCatalog", "UpdateArtifact")); + self.inner.unary(req, path, codec).await + } + /// Attempts to get or extend a reservation for the corresponding artifact. If one already exists + /// (ie. another entity owns the reservation) then that reservation is retrieved. + /// Once you acquire a reservation, you need to periodically extend the reservation with an + /// identical call. If the reservation is not extended before the defined expiration, it may be + /// acquired by another task. + /// Note: We may have multiple concurrent tasks with the same signature and the same input that + /// try to populate the same artifact at the same time. Thus with reservation, only one task can + /// run at a time, until the reservation expires. + /// Note: If task A does not extend the reservation in time and the reservation expires, another + /// task B may take over the reservation, resulting in two tasks A and B running in parallel. So + /// a third task C may get the Artifact from A or B, whichever writes last. + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("datacatalog.DataCatalog", "GetOrExtendReservation"), + ); + self.inner.unary(req, path, codec).await + } + /// Release the reservation when the task holding the spot fails so that the other tasks + /// can grab the spot. + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/datacatalog.DataCatalog/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("datacatalog.DataCatalog", "ReleaseReservation"), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs new file mode 100644 index 0000000000..145fd6500b --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs @@ -0,0 +1,3341 @@ +/// Encapsulation of fields that identifies a Flyte resource. +/// A Flyte resource can be a task, workflow or launch plan. +/// A resource can internally have multiple versions and is uniquely identified +/// by project, domain, and name. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityIdentifier { + /// Name of the project the resource belongs to. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the resource belongs to. + /// A domain can be considered as a subset within a specific project. + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// User provided value for the resource. + /// The combination of project + domain + name uniquely identifies the resource. + /// +optional - in certain contexts - like 'List API', 'Launch plans' + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "4")] + pub org: ::prost::alloc::string::String, +} +/// Additional metadata around a named entity. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityMetadata { + /// Common description across all versions of the entity + /// +optional + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// Shared state across all version of the entity + /// At this point in time, only workflow entities can have their state archived. + #[prost(enumeration = "NamedEntityState", tag = "2")] + pub state: i32, +} +/// Encapsulates information common to a NamedEntity, a Flyte resource such as a task, +/// workflow or launch plan. A NamedEntity is exclusively identified by its resource type +/// and identifier. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntity { + /// Resource type of the named entity. One of Task, Workflow or LaunchPlan. + #[prost(enumeration = "super::core::ResourceType", tag = "1")] + pub resource_type: i32, + #[prost(message, optional, tag = "2")] + pub id: ::core::option::Option, + /// Additional metadata around a named entity. + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, +} +/// Specifies sort ordering in a list request. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Sort { + /// Indicates an attribute to sort the response values. + /// +required + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// Indicates the direction to apply sort key for response values. + /// +optional + #[prost(enumeration = "sort::Direction", tag = "2")] + pub direction: i32, +} +/// Nested message and enum types in `Sort`. +pub mod sort { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Direction { + /// By default, fields are sorted in descending order. + Descending = 0, + Ascending = 1, + } + impl Direction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Direction::Descending => "DESCENDING", + Direction::Ascending => "ASCENDING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DESCENDING" => Some(Self::Descending), + "ASCENDING" => Some(Self::Ascending), + _ => None, + } + } + } +} +/// Represents a request structure to list NamedEntityIdentifiers. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityIdentifierListRequest { + /// Name of the project that contains the identifiers. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the identifiers belongs to within the project. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "3")] + pub limit: u32, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "4")] + pub token: ::prost::alloc::string::String, + /// Specifies how listed entities should be sorted in the response. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, + /// Indicates a list of filters passed as string. + /// +optional + #[prost(string, tag = "6")] + pub filters: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "7")] + pub org: ::prost::alloc::string::String, +} +/// Represents a request structure to list NamedEntity objects +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityListRequest { + /// Resource type of the metadata to query. One of Task, Workflow or LaunchPlan. + /// +required + #[prost(enumeration = "super::core::ResourceType", tag = "1")] + pub resource_type: i32, + /// Name of the project that contains the identifiers. + /// +required + #[prost(string, tag = "2")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the identifiers belongs to within the project. + #[prost(string, tag = "3")] + pub domain: ::prost::alloc::string::String, + /// Indicates the number of resources to be returned. + #[prost(uint32, tag = "4")] + pub limit: u32, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "5")] + pub token: ::prost::alloc::string::String, + /// Specifies how listed entities should be sorted in the response. + /// +optional + #[prost(message, optional, tag = "6")] + pub sort_by: ::core::option::Option, + /// Indicates a list of filters passed as string. + /// +optional + #[prost(string, tag = "7")] + pub filters: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "8")] + pub org: ::prost::alloc::string::String, +} +/// Represents a list of NamedEntityIdentifiers. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityIdentifierList { + /// A list of identifiers. + #[prost(message, repeated, tag = "1")] + pub entities: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Represents a list of NamedEntityIdentifiers. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityList { + /// A list of NamedEntity objects + #[prost(message, repeated, tag = "1")] + pub entities: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// A request to retrieve the metadata associated with a NamedEntityIdentifier +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityGetRequest { + /// Resource type of the metadata to get. One of Task, Workflow or LaunchPlan. + /// +required + #[prost(enumeration = "super::core::ResourceType", tag = "1")] + pub resource_type: i32, + /// The identifier for the named entity for which to fetch metadata. + /// +required + #[prost(message, optional, tag = "2")] + pub id: ::core::option::Option, +} +/// Request to set the referenced named entity state to the configured value. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityUpdateRequest { + /// Resource type of the metadata to update + /// +required + #[prost(enumeration = "super::core::ResourceType", tag = "1")] + pub resource_type: i32, + /// Identifier of the metadata to update + /// +required + #[prost(message, optional, tag = "2")] + pub id: ::core::option::Option, + /// Metadata object to set as the new value + /// +required + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NamedEntityUpdateResponse {} +/// Shared request structure to fetch a single resource. +/// Resources include: Task, Workflow, LaunchPlan +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ObjectGetRequest { + /// Indicates a unique version of resource. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Shared request structure to retrieve a list of resources. +/// Resources include: Task, Workflow, LaunchPlan +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourceListRequest { + /// id represents the unique identifier of the resource. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "2")] + pub limit: u32, + /// In the case of multiple pages of results, this server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "3")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "4")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, +} +/// Defines an email notification specification. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EmailNotification { + /// The list of email addresses recipients for this notification. + /// +required + #[prost(string, repeated, tag = "1")] + pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Defines a pager duty notification specification. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PagerDutyNotification { + /// Currently, PagerDuty notifications leverage email to trigger a notification. + /// +required + #[prost(string, repeated, tag = "1")] + pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Defines a slack notification specification. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SlackNotification { + /// Currently, Slack notifications leverage email to trigger a notification. + /// +required + #[prost(string, repeated, tag = "1")] + pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Represents a structure for notifications based on execution status. +/// The notification content is configured within flyte admin but can be templatized. +/// Future iterations could expose configuring notifications with custom content. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Notification { + /// A list of phases to which users can associate the notifications to. + /// +required + #[prost(enumeration = "super::core::workflow_execution::Phase", repeated, tag = "1")] + pub phases: ::prost::alloc::vec::Vec, + /// The type of notification to trigger. + /// +required + #[prost(oneof = "notification::Type", tags = "2, 3, 4")] + pub r#type: ::core::option::Option, +} +/// Nested message and enum types in `Notification`. +pub mod notification { + /// The type of notification to trigger. + /// +required + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Type { + #[prost(message, tag = "2")] + Email(super::EmailNotification), + #[prost(message, tag = "3")] + PagerDuty(super::PagerDutyNotification), + #[prost(message, tag = "4")] + Slack(super::SlackNotification), + } +} +/// Represents a string url and associated metadata used throughout the platform. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UrlBlob { + /// Actual url value. + #[prost(string, tag = "1")] + pub url: ::prost::alloc::string::String, + /// Represents the size of the file accessible at the above url. + #[prost(int64, tag = "2")] + pub bytes: i64, +} +/// Label values to be applied to an execution resource. +/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +/// to specify how to merge labels defined at registration and execution time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Labels { + /// Map of custom labels to be applied to the execution resource. + #[prost(map = "string, string", tag = "1")] + pub values: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Annotation values to be applied to an execution resource. +/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +/// to specify how to merge annotations defined at registration and execution time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Annotations { + /// Map of custom annotations to be applied to the execution resource. + #[prost(map = "string, string", tag = "1")] + pub values: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Environment variable values to be applied to an execution resource. +/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined +/// to specify how to merge environment variables defined at registration and execution time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Envs { + /// Map of custom environment variables to be applied to the execution resource. + #[prost(message, repeated, tag = "1")] + pub values: ::prost::alloc::vec::Vec, +} +/// Defines permissions associated with executions created by this launch plan spec. +/// Use either of these roles when they have permissions required by your workflow execution. +/// Deprecated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AuthRole { + /// Defines an optional iam role which will be used for tasks run in executions created with this launch plan. + #[prost(string, tag = "1")] + pub assumable_iam_role: ::prost::alloc::string::String, + /// Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. + #[prost(string, tag = "2")] + pub kubernetes_service_account: ::prost::alloc::string::String, +} +/// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). +/// See for more background information. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RawOutputDataConfig { + /// Prefix for where offloaded data from user workflows will be written + /// e.g. s3://bucket/key or s3://bucket/ + #[prost(string, tag = "1")] + pub output_location_prefix: ::prost::alloc::string::String, +} +/// These URLs are returned as part of node and task execution data requests. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FlyteUrLs { + #[prost(string, tag = "1")] + pub inputs: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub outputs: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub deck: ::prost::alloc::string::String, +} +/// The status of the named entity is used to control its visibility in the UI. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum NamedEntityState { + /// By default, all named entities are considered active and under development. + NamedEntityActive = 0, + /// Archived named entities are no longer visible in the UI. + NamedEntityArchived = 1, + /// System generated entities that aren't explicitly created or managed by a user. + SystemGenerated = 2, +} +impl NamedEntityState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + NamedEntityState::NamedEntityActive => "NAMED_ENTITY_ACTIVE", + NamedEntityState::NamedEntityArchived => "NAMED_ENTITY_ARCHIVED", + NamedEntityState::SystemGenerated => "SYSTEM_GENERATED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NAMED_ENTITY_ACTIVE" => Some(Self::NamedEntityActive), + "NAMED_ENTITY_ARCHIVED" => Some(Self::NamedEntityArchived), + "SYSTEM_GENERATED" => Some(Self::SystemGenerated), + _ => None, + } + } +} +/// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. +/// See :ref:`ref_flyteidl.admin.Signal` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalGetOrCreateRequest { + /// A unique identifier for the requested signal. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// A type denoting the required value type for this signal. + #[prost(message, optional, tag = "2")] + pub r#type: ::core::option::Option, +} +/// SignalListRequest represents a request structure to retrieve a collection of signals. +/// See :ref:`ref_flyteidl.admin.Signal` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalListRequest { + /// Indicates the workflow execution to filter by. + /// +required + #[prost(message, optional, tag = "1")] + pub workflow_execution_id: ::core::option::Option< + super::core::WorkflowExecutionIdentifier, + >, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "2")] + pub limit: u32, + /// In the case of multiple pages of results, the, server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "3")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// +optional + #[prost(string, tag = "4")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, +} +/// SignalList represents collection of signals along with the token of the last result. +/// See :ref:`ref_flyteidl.admin.Signal` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalList { + /// A list of signals matching the input filters. + #[prost(message, repeated, tag = "1")] + pub signals: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// SignalSetRequest represents a request structure to set the value on a signal. Setting a signal +/// effetively satisfies the signal condition within a Flyte workflow. +/// See :ref:`ref_flyteidl.admin.Signal` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalSetRequest { + /// A unique identifier for the requested signal. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// The value of this signal, must match the defining signal type. + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// SignalSetResponse represents a response structure if signal setting succeeds. +/// +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalSetResponse {} +/// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte +/// signal. Signals may exist either without a set value (representing a signal request) or with a +/// populated value (indicating the signal has been given). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Signal { + /// A unique identifier for the requested signal. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// A type denoting the required value type for this signal. + #[prost(message, optional, tag = "2")] + pub r#type: ::core::option::Option, + /// The value of the signal. This is only available if the signal has been "set" and must match + /// the defined the type. + #[prost(message, optional, tag = "3")] + pub value: ::core::option::Option, +} +/// Represents a subset of runtime task execution metadata that are relevant to external plugins. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionMetadata { + /// ID of the task execution + #[prost(message, optional, tag = "1")] + pub task_execution_id: ::core::option::Option, + /// k8s namespace where the task is executed in + #[prost(string, tag = "2")] + pub namespace: ::prost::alloc::string::String, + /// Labels attached to the task execution + #[prost(map = "string, string", tag = "3")] + pub labels: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Annotations attached to the task execution + #[prost(map = "string, string", tag = "4")] + pub annotations: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// k8s service account associated with the task execution + #[prost(string, tag = "5")] + pub k8s_service_account: ::prost::alloc::string::String, + /// Environment variables attached to the task execution + #[prost(map = "string, string", tag = "6")] + pub environment_variables: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Represents the maximum number of attempts allowed for a task. + /// If a task fails, it can be retried up to this maximum number of attempts. + #[prost(int32, tag = "7")] + pub max_attempts: i32, + /// Indicates whether the task execution can be interrupted. + /// If set to true, the task can be stopped before completion. + #[prost(bool, tag = "8")] + pub interruptible: bool, + /// Specifies the threshold for failure count at which the interruptible property + /// will take effect. If the number of consecutive task failures exceeds this threshold, + /// interruptible behavior will be activated. + #[prost(int32, tag = "9")] + pub interruptible_failure_threshold: i32, + /// Overrides for specific properties of the task node. + /// These overrides can be used to customize the behavior of the task node. + #[prost(message, optional, tag = "10")] + pub overrides: ::core::option::Option, +} +/// Represents a request structure to create task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTaskRequest { + /// The inputs required to start the execution. All required inputs must be + /// included in this map. If not required and not provided, defaults apply. + /// +optional + #[prost(message, optional, tag = "1")] + pub inputs: ::core::option::Option, + /// Template of the task that encapsulates all the metadata of the task. + #[prost(message, optional, tag = "2")] + pub template: ::core::option::Option, + /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + #[prost(string, tag = "3")] + pub output_prefix: ::prost::alloc::string::String, + /// subset of runtime task execution metadata. + #[prost(message, optional, tag = "4")] + pub task_execution_metadata: ::core::option::Option, +} +/// Represents a create response structure. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateTaskResponse { + /// ResourceMeta is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + #[prost(bytes = "vec", tag = "1")] + pub resource_meta: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateRequestHeader { + /// Template of the task that encapsulates all the metadata of the task. + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, + /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + #[prost(string, tag = "2")] + pub output_prefix: ::prost::alloc::string::String, + /// subset of runtime task execution metadata. + #[prost(message, optional, tag = "3")] + pub task_execution_metadata: ::core::option::Option, + /// MaxDatasetSizeBytes is the maximum size of the dataset that can be generated by the task. + #[prost(int64, tag = "4")] + pub max_dataset_size_bytes: i64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteTaskSyncRequest { + #[prost(oneof = "execute_task_sync_request::Part", tags = "1, 2")] + pub part: ::core::option::Option, +} +/// Nested message and enum types in `ExecuteTaskSyncRequest`. +pub mod execute_task_sync_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Part { + #[prost(message, tag = "1")] + Header(super::CreateRequestHeader), + #[prost(message, tag = "2")] + Inputs(super::super::core::LiteralMap), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteTaskSyncResponseHeader { + #[prost(message, optional, tag = "1")] + pub resource: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecuteTaskSyncResponse { + /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + /// Resource is for synchronous task execution. + #[prost(oneof = "execute_task_sync_response::Res", tags = "1, 2")] + pub res: ::core::option::Option, +} +/// Nested message and enum types in `ExecuteTaskSyncResponse`. +pub mod execute_task_sync_response { + /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + /// Resource is for synchronous task execution. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Res { + #[prost(message, tag = "1")] + Header(super::ExecuteTaskSyncResponseHeader), + #[prost(message, tag = "2")] + Outputs(super::super::core::LiteralMap), + } +} +/// A message used to fetch a job resource from flyte agent server. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskRequest { + /// A predefined yet extensible Task type identifier. + #[deprecated] + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// Metadata about the resource to be pass to the agent. + #[prost(bytes = "vec", tag = "2")] + pub resource_meta: ::prost::alloc::vec::Vec, + /// A predefined yet extensible Task type identifier. + #[prost(message, optional, tag = "3")] + pub task_category: ::core::option::Option, +} +/// Response to get an individual task resource. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskResponse { + #[prost(message, optional, tag = "1")] + pub resource: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Resource { + /// DEPRECATED. The state of the execution is used to control its visibility in the UI/CLI. + #[deprecated] + #[prost(enumeration = "State", tag = "1")] + pub state: i32, + /// The outputs of the execution. It's typically used by sql task. Agent service will create a + /// Structured dataset pointing to the query result table. + /// +optional + #[prost(message, optional, tag = "2")] + pub outputs: ::core::option::Option, + /// A descriptive message for the current state. e.g. waiting for cluster. + #[prost(string, tag = "3")] + pub message: ::prost::alloc::string::String, + /// log information for the task execution. + #[prost(message, repeated, tag = "4")] + pub log_links: ::prost::alloc::vec::Vec, + /// The phase of the execution is used to determine the phase of the plugin's execution. + #[prost(enumeration = "super::core::task_execution::Phase", tag = "5")] + pub phase: i32, + /// Custom data specific to the agent. + #[prost(message, optional, tag = "6")] + pub custom_info: ::core::option::Option<::prost_types::Struct>, +} +/// A message used to delete a task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteTaskRequest { + /// A predefined yet extensible Task type identifier. + #[deprecated] + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// Metadata about the resource to be pass to the agent. + #[prost(bytes = "vec", tag = "2")] + pub resource_meta: ::prost::alloc::vec::Vec, + /// A predefined yet extensible Task type identifier. + #[prost(message, optional, tag = "3")] + pub task_category: ::core::option::Option, +} +/// Response to delete a task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteTaskResponse {} +/// A message containing the agent metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Agent { + /// Name is the developer-assigned name of the agent. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// SupportedTaskTypes are the types of the tasks that the agent can handle. + #[deprecated] + #[prost(string, repeated, tag = "2")] + pub supported_task_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// IsSync indicates whether this agent is a sync agent. Sync agents are expected to return their + /// results synchronously when called by propeller. Given that sync agents can affect the performance + /// of the system, it's important to enforce strict timeout policies. + /// An Async agent, on the other hand, is required to be able to identify jobs by an + /// identifier and query for job statuses as jobs progress. + #[prost(bool, tag = "3")] + pub is_sync: bool, + /// Supported_task_categories are the categories of the tasks that the agent can handle. + #[prost(message, repeated, tag = "4")] + pub supported_task_categories: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskCategory { + /// The name of the task type. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The version of the task type. + #[prost(int32, tag = "2")] + pub version: i32, +} +/// A request to get an agent. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAgentRequest { + /// The name of the agent. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, +} +/// A response containing an agent. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetAgentResponse { + #[prost(message, optional, tag = "1")] + pub agent: ::core::option::Option, +} +/// A request to list all agents. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListAgentsRequest {} +/// A response containing a list of agents. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListAgentsResponse { + #[prost(message, repeated, tag = "1")] + pub agents: ::prost::alloc::vec::Vec, +} +/// A request to get the metrics from a task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskMetricsRequest { + /// A predefined yet extensible Task type identifier. + #[deprecated] + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + #[prost(bytes = "vec", tag = "2")] + pub resource_meta: ::prost::alloc::vec::Vec, + /// The metrics to query. If empty, will return a default set of metrics. + /// e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG + #[prost(string, repeated, tag = "3")] + pub queries: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Start timestamp, inclusive. + #[prost(message, optional, tag = "4")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// End timestamp, inclusive.. + #[prost(message, optional, tag = "5")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// Query resolution step width in duration format or float number of seconds. + #[prost(message, optional, tag = "6")] + pub step: ::core::option::Option<::prost_types::Duration>, + /// A predefined yet extensible Task type identifier. + #[prost(message, optional, tag = "7")] + pub task_category: ::core::option::Option, +} +/// A response containing a list of metrics for a task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskMetricsResponse { + /// The execution metric results. + #[prost(message, repeated, tag = "1")] + pub results: ::prost::alloc::vec::Vec, +} +/// A request to get the log from a task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskLogsRequest { + /// A predefined yet extensible Task type identifier. + #[deprecated] + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). + #[prost(bytes = "vec", tag = "2")] + pub resource_meta: ::prost::alloc::vec::Vec, + /// Number of lines to return. + #[prost(uint64, tag = "3")] + pub lines: u64, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "4")] + pub token: ::prost::alloc::string::String, + /// A predefined yet extensible Task type identifier. + #[prost(message, optional, tag = "5")] + pub task_category: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskLogsResponseHeader { + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "1")] + pub token: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskLogsResponseBody { + /// The execution log results. + #[prost(string, repeated, tag = "1")] + pub results: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// A response containing the logs for a task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetTaskLogsResponse { + #[prost(oneof = "get_task_logs_response::Part", tags = "1, 2")] + pub part: ::core::option::Option, +} +/// Nested message and enum types in `GetTaskLogsResponse`. +pub mod get_task_logs_response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Part { + #[prost(message, tag = "1")] + Header(super::GetTaskLogsResponseHeader), + #[prost(message, tag = "2")] + Body(super::GetTaskLogsResponseBody), + } +} +/// The state of the execution is used to control its visibility in the UI/CLI. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum State { + RetryableFailure = 0, + PermanentFailure = 1, + Pending = 2, + Running = 3, + Succeeded = 4, +} +impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::RetryableFailure => "RETRYABLE_FAILURE", + State::PermanentFailure => "PERMANENT_FAILURE", + State::Pending => "PENDING", + State::Running => "RUNNING", + State::Succeeded => "SUCCEEDED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RETRYABLE_FAILURE" => Some(Self::RetryableFailure), + "PERMANENT_FAILURE" => Some(Self::PermanentFailure), + "PENDING" => Some(Self::Pending), + "RUNNING" => Some(Self::Running), + "SUCCEEDED" => Some(Self::Succeeded), + _ => None, + } + } +} +/// Namespace within a project commonly used to differentiate between different service instances. +/// e.g. "production", "development", etc. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Domain { + /// Globally unique domain name. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Display name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, +} +/// Top-level namespace used to classify different entities like workflows and executions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Project { + /// Globally unique project name. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Display name. + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub domains: ::prost::alloc::vec::Vec, + #[prost(string, tag = "4")] + pub description: ::prost::alloc::string::String, + /// Leverage Labels from flyteidl.admin.common.proto to + /// tag projects with ownership information. + #[prost(message, optional, tag = "5")] + pub labels: ::core::option::Option, + #[prost(enumeration = "project::ProjectState", tag = "6")] + pub state: i32, + /// Optional, org key applied to the resource. + #[prost(string, tag = "7")] + pub org: ::prost::alloc::string::String, +} +/// Nested message and enum types in `Project`. +pub mod project { + /// The state of the project is used to control its visibility in the UI and validity. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ProjectState { + /// By default, all projects are considered active. + Active = 0, + /// Archived projects are no longer visible in the UI and no longer valid. + Archived = 1, + /// System generated projects that aren't explicitly created or managed by a user. + SystemGenerated = 2, + } + impl ProjectState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ProjectState::Active => "ACTIVE", + ProjectState::Archived => "ARCHIVED", + ProjectState::SystemGenerated => "SYSTEM_GENERATED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ACTIVE" => Some(Self::Active), + "ARCHIVED" => Some(Self::Archived), + "SYSTEM_GENERATED" => Some(Self::SystemGenerated), + _ => None, + } + } + } +} +/// Represents a list of projects. +/// See :ref:`ref_flyteidl.admin.Project` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Projects { + #[prost(message, repeated, tag = "1")] + pub projects: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Request to retrieve a list of projects matching specified filters. +/// See :ref:`ref_flyteidl.admin.Project` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectListRequest { + /// Indicates the number of projects to be returned. + /// +required + #[prost(uint32, tag = "1")] + pub limit: u32, + /// In the case of multiple pages of results, this server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "3")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "4")] + pub sort_by: ::core::option::Option, + /// Optional, org filter applied to list project requests. + #[prost(string, tag = "5")] + pub org: ::prost::alloc::string::String, +} +/// Adds a new user-project within the Flyte deployment. +/// See :ref:`ref_flyteidl.admin.Project` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectRegisterRequest { + /// +required + #[prost(message, optional, tag = "1")] + pub project: ::core::option::Option, +} +/// Purposefully empty, may be updated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectRegisterResponse {} +/// Purposefully empty, may be updated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectUpdateResponse {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectGetRequest { + /// Indicates a unique project. + /// +required + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "2")] + pub org: ::prost::alloc::string::String, +} +/// Encapsulates specifications for routing an execution onto a specific cluster. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClusterAssignment { + #[prost(string, tag = "3")] + pub cluster_pool_name: ::prost::alloc::string::String, +} +/// Defines a set of overridable task resource attributes set during task registration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskResourceSpec { + #[prost(string, tag = "1")] + pub cpu: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub gpu: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub memory: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub storage: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub ephemeral_storage: ::prost::alloc::string::String, +} +/// Defines task resource defaults and limits that will be applied at task registration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskResourceAttributes { + #[prost(message, optional, tag = "1")] + pub defaults: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub limits: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ClusterResourceAttributes { + /// Custom resource attributes which will be applied in cluster resource creation (e.g. quotas). + /// Map keys are the *case-sensitive* names of variables in templatized resource files. + /// Map values should be the custom values which get substituted during resource creation. + #[prost(map = "string, string", tag = "1")] + pub attributes: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionQueueAttributes { + /// Tags used for assigning execution queues for tasks defined within this project. + #[prost(string, repeated, tag = "1")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionClusterLabel { + /// Label value to determine where the execution will be run + #[prost(string, tag = "1")] + pub value: ::prost::alloc::string::String, +} +/// This MatchableAttribute configures selecting alternate plugin implementations for a given task type. +/// In addition to an override implementation a selection of fallbacks can be provided or other modes +/// for handling cases where the desired plugin override is not enabled in a given Flyte deployment. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PluginOverride { + /// A predefined yet extensible Task type identifier. + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// A set of plugin ids which should handle tasks of this type instead of the default registered plugin. The list will be tried in order until a plugin is found with that id. + #[prost(string, repeated, tag = "2")] + pub plugin_id: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Defines the behavior when no plugin from the plugin_id list is not found. + #[prost(enumeration = "plugin_override::MissingPluginBehavior", tag = "4")] + pub missing_plugin_behavior: i32, +} +/// Nested message and enum types in `PluginOverride`. +pub mod plugin_override { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum MissingPluginBehavior { + /// By default, if this plugin is not enabled for a Flyte deployment then execution will fail. + Fail = 0, + /// Uses the system-configured default implementation. + UseDefault = 1, + } + impl MissingPluginBehavior { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MissingPluginBehavior::Fail => "FAIL", + MissingPluginBehavior::UseDefault => "USE_DEFAULT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FAIL" => Some(Self::Fail), + "USE_DEFAULT" => Some(Self::UseDefault), + _ => None, + } + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PluginOverrides { + #[prost(message, repeated, tag = "1")] + pub overrides: ::prost::alloc::vec::Vec, +} +/// Adds defaults for customizable workflow-execution specifications and overrides. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionConfig { + /// Can be used to control the number of parallel nodes to run within the workflow. This is useful to achieve fairness. + #[prost(int32, tag = "1")] + pub max_parallelism: i32, + /// Indicates security context permissions for executions triggered with this matchable attribute. + #[prost(message, optional, tag = "2")] + pub security_context: ::core::option::Option, + /// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). + #[prost(message, optional, tag = "3")] + pub raw_output_data_config: ::core::option::Option, + /// Custom labels to be applied to a triggered execution resource. + #[prost(message, optional, tag = "4")] + pub labels: ::core::option::Option, + /// Custom annotations to be applied to a triggered execution resource. + #[prost(message, optional, tag = "5")] + pub annotations: ::core::option::Option, + /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. + /// Omitting this field uses the workflow's value as a default. + /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + /// around the bool field. + #[prost(message, optional, tag = "6")] + pub interruptible: ::core::option::Option, + /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored + /// data once execution finishes successfully. + #[prost(bool, tag = "7")] + pub overwrite_cache: bool, + /// Environment variables to be set for the execution. + #[prost(message, optional, tag = "8")] + pub envs: ::core::option::Option, +} +/// Generic container for encapsulating all types of the above attributes messages. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MatchingAttributes { + #[prost(oneof = "matching_attributes::Target", tags = "1, 2, 3, 4, 5, 6, 7, 8")] + pub target: ::core::option::Option, +} +/// Nested message and enum types in `MatchingAttributes`. +pub mod matching_attributes { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Target { + #[prost(message, tag = "1")] + TaskResourceAttributes(super::TaskResourceAttributes), + #[prost(message, tag = "2")] + ClusterResourceAttributes(super::ClusterResourceAttributes), + #[prost(message, tag = "3")] + ExecutionQueueAttributes(super::ExecutionQueueAttributes), + #[prost(message, tag = "4")] + ExecutionClusterLabel(super::ExecutionClusterLabel), + #[prost(message, tag = "5")] + QualityOfService(super::super::core::QualityOfService), + #[prost(message, tag = "6")] + PluginOverrides(super::PluginOverrides), + #[prost(message, tag = "7")] + WorkflowExecutionConfig(super::WorkflowExecutionConfig), + #[prost(message, tag = "8")] + ClusterAssignment(super::ClusterAssignment), + } +} +/// Represents a custom set of attributes applied for either a domain (and optional org); a domain and project (and optional org); +/// or domain, project and workflow name (and optional org). +/// These are used to override system level defaults for kubernetes cluster resource management, +/// default execution values, and more all across different levels of specificity. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MatchableAttributesConfiguration { + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub project: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub workflow: ::prost::alloc::string::String, + #[prost(string, tag = "5")] + pub launch_plan: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "6")] + pub org: ::prost::alloc::string::String, +} +/// Request all matching resource attributes for a resource type. +/// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListMatchableAttributesRequest { + /// +required + #[prost(enumeration = "MatchableResource", tag = "1")] + pub resource_type: i32, + /// Optional, org filter applied to list project requests. + #[prost(string, tag = "2")] + pub org: ::prost::alloc::string::String, +} +/// Response for a request for all matching resource attributes for a resource type. +/// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListMatchableAttributesResponse { + #[prost(message, repeated, tag = "1")] + pub configurations: ::prost::alloc::vec::Vec, +} +/// Defines a resource that can be configured by customizable Project-, ProjectDomain- or WorkflowAttributes +/// based on matching tags. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum MatchableResource { + /// Applies to customizable task resource requests and limits. + TaskResource = 0, + /// Applies to configuring templated kubernetes cluster resources. + ClusterResource = 1, + /// Configures task and dynamic task execution queue assignment. + ExecutionQueue = 2, + /// Configures the K8s cluster label to be used for execution to be run + ExecutionClusterLabel = 3, + /// Configures default quality of service when undefined in an execution spec. + QualityOfServiceSpecification = 4, + /// Selects configurable plugin implementation behavior for a given task type. + PluginOverride = 5, + /// Adds defaults for customizable workflow-execution specifications and overrides. + WorkflowExecutionConfig = 6, + /// Controls how to select an available cluster on which this execution should run. + ClusterAssignment = 7, +} +impl MatchableResource { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MatchableResource::TaskResource => "TASK_RESOURCE", + MatchableResource::ClusterResource => "CLUSTER_RESOURCE", + MatchableResource::ExecutionQueue => "EXECUTION_QUEUE", + MatchableResource::ExecutionClusterLabel => "EXECUTION_CLUSTER_LABEL", + MatchableResource::QualityOfServiceSpecification => { + "QUALITY_OF_SERVICE_SPECIFICATION" + } + MatchableResource::PluginOverride => "PLUGIN_OVERRIDE", + MatchableResource::WorkflowExecutionConfig => "WORKFLOW_EXECUTION_CONFIG", + MatchableResource::ClusterAssignment => "CLUSTER_ASSIGNMENT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TASK_RESOURCE" => Some(Self::TaskResource), + "CLUSTER_RESOURCE" => Some(Self::ClusterResource), + "EXECUTION_QUEUE" => Some(Self::ExecutionQueue), + "EXECUTION_CLUSTER_LABEL" => Some(Self::ExecutionClusterLabel), + "QUALITY_OF_SERVICE_SPECIFICATION" => { + Some(Self::QualityOfServiceSpecification) + } + "PLUGIN_OVERRIDE" => Some(Self::PluginOverride), + "WORKFLOW_EXECUTION_CONFIG" => Some(Self::WorkflowExecutionConfig), + "CLUSTER_ASSIGNMENT" => Some(Self::ClusterAssignment), + _ => None, + } + } +} +/// Defines a set of custom matching attributes which defines resource defaults for a project and domain. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributes { + /// Unique project id for which this set of attributes will be applied. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id for which this set of attributes will be applied. + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub matching_attributes: ::core::option::Option, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "4")] + pub org: ::prost::alloc::string::String, +} +/// Sets custom attributes for a project-domain combination. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesUpdateRequest { + /// +required + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesUpdateResponse {} +/// Request to get an individual project domain attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesGetRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id which this set of attributes references. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Which type of matchable attributes to return. + /// +required + #[prost(enumeration = "MatchableResource", tag = "3")] + pub resource_type: i32, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "4")] + pub org: ::prost::alloc::string::String, +} +/// Response to get an individual project domain attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesGetResponse { + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Request to delete a set matchable project domain attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesDeleteRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id which this set of attributes references. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Which type of matchable attributes to delete. + /// +required + #[prost(enumeration = "MatchableResource", tag = "3")] + pub resource_type: i32, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "4")] + pub org: ::prost::alloc::string::String, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectDomainAttributesDeleteResponse {} +/// Defines a set of custom matching attributes at the project level. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributes { + /// Unique project id for which this set of attributes will be applied. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub matching_attributes: ::core::option::Option, + /// Optional, org key applied to the project. + #[prost(string, tag = "3")] + pub org: ::prost::alloc::string::String, +} +/// Sets custom attributes for a project +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesUpdateRequest { + /// +required + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesUpdateResponse {} +/// Request to get an individual project level attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesGetRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Which type of matchable attributes to return. + /// +required + #[prost(enumeration = "MatchableResource", tag = "2")] + pub resource_type: i32, + /// Optional, org key applied to the project. + #[prost(string, tag = "3")] + pub org: ::prost::alloc::string::String, +} +/// Response to get an individual project level attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesGetResponse { + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Request to delete a set matchable project level attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesDeleteRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Which type of matchable attributes to delete. + /// +required + #[prost(enumeration = "MatchableResource", tag = "2")] + pub resource_type: i32, + /// Optional, org key applied to the project. + #[prost(string, tag = "3")] + pub org: ::prost::alloc::string::String, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProjectAttributesDeleteResponse {} +/// DescriptionEntity contains detailed description for the task/workflow. +/// Documentation could provide insight into the algorithms, business use case, etc. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DescriptionEntity { + /// id represents the unique identifier of the description entity. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// One-liner overview of the entity. + #[prost(string, tag = "2")] + pub short_description: ::prost::alloc::string::String, + /// Full user description with formatting preserved. + #[prost(message, optional, tag = "3")] + pub long_description: ::core::option::Option, + /// Optional link to source code used to define this entity. + #[prost(message, optional, tag = "4")] + pub source_code: ::core::option::Option, + /// User-specified tags. These are arbitrary and can be used for searching + /// filtering and discovering tasks. + #[prost(string, repeated, tag = "5")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Full user description with formatting preserved. This can be rendered +/// by clients, such as the console or command line tools with in-tact +/// formatting. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Description { + /// Format of the long description + #[prost(enumeration = "DescriptionFormat", tag = "3")] + pub format: i32, + /// Optional link to an icon for the entity + #[prost(string, tag = "4")] + pub icon_link: ::prost::alloc::string::String, + #[prost(oneof = "description::Content", tags = "1, 2")] + pub content: ::core::option::Option, +} +/// Nested message and enum types in `Description`. +pub mod description { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Content { + /// long description - no more than 4KB + #[prost(string, tag = "1")] + Value(::prost::alloc::string::String), + /// if the description sizes exceed some threshold we can offload the entire + /// description proto altogether to an external data store, like S3 rather than store inline in the db + #[prost(string, tag = "2")] + Uri(::prost::alloc::string::String), + } +} +/// Link to source code used to define this entity +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SourceCode { + #[prost(string, tag = "1")] + pub link: ::prost::alloc::string::String, +} +/// Represents a list of DescriptionEntities returned from the admin. +/// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DescriptionEntityList { + /// A list of DescriptionEntities returned based on the request. + #[prost(message, repeated, tag = "1")] + pub description_entities: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Represents a request structure to retrieve a list of DescriptionEntities. +/// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DescriptionEntityListRequest { + /// Identifies the specific type of resource that this identifier corresponds to. + #[prost(enumeration = "super::core::ResourceType", tag = "1")] + pub resource_type: i32, + /// The identifier for the description entity. + /// +required + #[prost(message, optional, tag = "2")] + pub id: ::core::option::Option, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "3")] + pub limit: u32, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "4")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "5")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering for returned list. + /// +optional + #[prost(message, optional, tag = "6")] + pub sort_by: ::core::option::Option, +} +/// The format of the long description +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum DescriptionFormat { + Unknown = 0, + Markdown = 1, + Html = 2, + /// python default documentation - comments is rst + Rst = 3, +} +impl DescriptionFormat { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + DescriptionFormat::Unknown => "DESCRIPTION_FORMAT_UNKNOWN", + DescriptionFormat::Markdown => "DESCRIPTION_FORMAT_MARKDOWN", + DescriptionFormat::Html => "DESCRIPTION_FORMAT_HTML", + DescriptionFormat::Rst => "DESCRIPTION_FORMAT_RST", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DESCRIPTION_FORMAT_UNKNOWN" => Some(Self::Unknown), + "DESCRIPTION_FORMAT_MARKDOWN" => Some(Self::Markdown), + "DESCRIPTION_FORMAT_HTML" => Some(Self::Html), + "DESCRIPTION_FORMAT_RST" => Some(Self::Rst), + _ => None, + } + } +} +/// Represents a request structure to create a revision of a task. +/// See :ref:`ref_flyteidl.admin.Task` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskCreateRequest { + /// id represents the unique identifier of the task. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Represents the specification for task. + /// +required + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, +} +/// Represents a response structure if task creation succeeds. +/// +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskCreateResponse {} +/// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks +/// arranged to process workflow inputs and produce a deterministic set of outputs. +/// Tasks can come in many varieties tuned for specialized behavior. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Task { + /// id represents the unique identifier of the task. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// closure encapsulates all the fields that maps to a compiled version of the task. + #[prost(message, optional, tag = "2")] + pub closure: ::core::option::Option, + /// One-liner overview of the entity. + #[prost(string, tag = "3")] + pub short_description: ::prost::alloc::string::String, +} +/// Represents a list of tasks returned from the admin. +/// See :ref:`ref_flyteidl.admin.Task` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskList { + /// A list of tasks returned based on the request. + #[prost(message, repeated, tag = "1")] + pub tasks: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Represents a structure that encapsulates the user-configured specification of the task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskSpec { + /// Template of the task that encapsulates all the metadata of the task. + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, + /// Represents the specification for description entity. + #[prost(message, optional, tag = "2")] + pub description: ::core::option::Option, +} +/// Compute task attributes which include values derived from the TaskSpec, as well as plugin-specific data +/// and task metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskClosure { + /// Represents the compiled representation of the task from the specification provided. + #[prost(message, optional, tag = "1")] + pub compiled_task: ::core::option::Option, + /// Time at which the task was created. + #[prost(message, optional, tag = "2")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// Represents a request structure to create a revision of a workflow. +/// See :ref:`ref_flyteidl.admin.Workflow` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowCreateRequest { + /// id represents the unique identifier of the workflow. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Represents the specification for workflow. + /// +required + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowCreateResponse {} +/// Represents the workflow structure stored in the Admin +/// A workflow is created by ordering tasks and associating outputs to inputs +/// in order to produce a directed-acyclic execution graph. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Workflow { + /// id represents the unique identifier of the workflow. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// closure encapsulates all the fields that maps to a compiled version of the workflow. + #[prost(message, optional, tag = "2")] + pub closure: ::core::option::Option, + /// One-liner overview of the entity. + #[prost(string, tag = "3")] + pub short_description: ::prost::alloc::string::String, +} +/// Represents a list of workflows returned from the admin. +/// See :ref:`ref_flyteidl.admin.Workflow` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowList { + /// A list of workflows returned based on the request. + #[prost(message, repeated, tag = "1")] + pub workflows: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Represents a structure that encapsulates the specification of the workflow. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowSpec { + /// Template of the task that encapsulates all the metadata of the workflow. + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, + /// Workflows that are embedded into other workflows need to be passed alongside the parent workflow to the + /// propeller compiler (since the compiler doesn't have any knowledge of other workflows - ie, it doesn't reach out + /// to Admin to see other registered workflows). In fact, subworkflows do not even need to be registered. + #[prost(message, repeated, tag = "2")] + pub sub_workflows: ::prost::alloc::vec::Vec, + /// Represents the specification for description entity. + #[prost(message, optional, tag = "3")] + pub description: ::core::option::Option, +} +/// A container holding the compiled workflow produced from the WorkflowSpec and additional metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowClosure { + /// Represents the compiled representation of the workflow from the specification provided. + #[prost(message, optional, tag = "1")] + pub compiled_workflow: ::core::option::Option, + /// Time at which the workflow was created. + #[prost(message, optional, tag = "2")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// The workflow id is already used and the structure is different +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowErrorExistsDifferentStructure { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// The workflow id is already used with an identical sctructure +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowErrorExistsIdenticalStructure { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// When a CreateWorkflowRequest fails due to matching id +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateWorkflowFailureReason { + #[prost(oneof = "create_workflow_failure_reason::Reason", tags = "1, 2")] + pub reason: ::core::option::Option, +} +/// Nested message and enum types in `CreateWorkflowFailureReason`. +pub mod create_workflow_failure_reason { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reason { + #[prost(message, tag = "1")] + ExistsDifferentStructure(super::WorkflowErrorExistsDifferentStructure), + #[prost(message, tag = "2")] + ExistsIdenticalStructure(super::WorkflowErrorExistsIdenticalStructure), + } +} +/// Defines a set of custom matching attributes which defines resource defaults for a project, domain and workflow. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributes { + /// Unique project id for which this set of attributes will be applied. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id for which this set of attributes will be applied. + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Workflow name for which this set of attributes will be applied. + #[prost(string, tag = "3")] + pub workflow: ::prost::alloc::string::String, + #[prost(message, optional, tag = "4")] + pub matching_attributes: ::core::option::Option, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "5")] + pub org: ::prost::alloc::string::String, +} +/// Sets custom attributes for a project, domain and workflow combination. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesUpdateRequest { + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesUpdateResponse {} +/// Request to get an individual workflow attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesGetRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id which this set of attributes references. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Workflow name which this set of attributes references. + /// +required + #[prost(string, tag = "3")] + pub workflow: ::prost::alloc::string::String, + /// Which type of matchable attributes to return. + /// +required + #[prost(enumeration = "MatchableResource", tag = "4")] + pub resource_type: i32, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "5")] + pub org: ::prost::alloc::string::String, +} +/// Response to get an individual workflow attribute override. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesGetResponse { + #[prost(message, optional, tag = "1")] + pub attributes: ::core::option::Option, +} +/// Request to delete a set matchable workflow attribute override. +/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesDeleteRequest { + /// Unique project id which this set of attributes references. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Unique domain id which this set of attributes references. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Workflow name which this set of attributes references. + /// +required + #[prost(string, tag = "3")] + pub workflow: ::prost::alloc::string::String, + /// Which type of matchable attributes to delete. + /// +required + #[prost(enumeration = "MatchableResource", tag = "4")] + pub resource_type: i32, + /// Optional, org key applied to the attributes. + #[prost(string, tag = "5")] + pub org: ::prost::alloc::string::String, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowAttributesDeleteResponse {} +/// Option for schedules run at a certain frequency e.g. every 2 minutes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FixedRate { + #[prost(uint32, tag = "1")] + pub value: u32, + #[prost(enumeration = "FixedRateUnit", tag = "2")] + pub unit: i32, +} +/// Options for schedules to run according to a cron expression. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CronSchedule { + /// Standard/default cron implementation as described by + /// Also supports nonstandard predefined scheduling definitions + /// as described by + /// except @reboot + #[prost(string, tag = "1")] + pub schedule: ::prost::alloc::string::String, + /// ISO 8601 duration as described by + #[prost(string, tag = "2")] + pub offset: ::prost::alloc::string::String, +} +/// Defines complete set of information required to trigger an execution on a schedule. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Schedule { + /// Name of the input variable that the kickoff time will be supplied to when the workflow is kicked off. + #[prost(string, tag = "3")] + pub kickoff_time_input_arg: ::prost::alloc::string::String, + #[prost(oneof = "schedule::ScheduleExpression", tags = "1, 2, 4")] + pub schedule_expression: ::core::option::Option, +} +/// Nested message and enum types in `Schedule`. +pub mod schedule { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum ScheduleExpression { + /// Uses AWS syntax: Minutes Hours Day-of-month Month Day-of-week Year + /// e.g. for a schedule that runs every 15 minutes: 0/15 * * * ? * + #[prost(string, tag = "1")] + CronExpression(::prost::alloc::string::String), + #[prost(message, tag = "2")] + Rate(super::FixedRate), + #[prost(message, tag = "4")] + CronSchedule(super::CronSchedule), + } +} +/// Represents a frequency at which to run a schedule. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum FixedRateUnit { + Minute = 0, + Hour = 1, + Day = 2, +} +impl FixedRateUnit { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + FixedRateUnit::Minute => "MINUTE", + FixedRateUnit::Hour => "HOUR", + FixedRateUnit::Day => "DAY", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MINUTE" => Some(Self::Minute), + "HOUR" => Some(Self::Hour), + "DAY" => Some(Self::Day), + _ => None, + } + } +} +/// Request to register a launch plan. The included LaunchPlanSpec may have a complete or incomplete set of inputs required +/// to launch a workflow execution. By default all launch plans are registered in state INACTIVE. If you wish to +/// set the state to ACTIVE, you must submit a LaunchPlanUpdateRequest, after you have successfully created a launch plan. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanCreateRequest { + /// Uniquely identifies a launch plan entity. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// User-provided launch plan details, including reference workflow, inputs and other metadata. + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanCreateResponse {} +/// A LaunchPlan provides the capability to templatize workflow executions. +/// Launch plans simplify associating one or more schedules, inputs and notifications with your workflows. +/// Launch plans can be shared and used to trigger executions with predefined inputs even when a workflow +/// definition doesn't necessarily have a default value for said input. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlan { + /// Uniquely identifies a launch plan entity. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// User-provided launch plan details, including reference workflow, inputs and other metadata. + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, + /// Values computed by the flyte platform after launch plan registration. + #[prost(message, optional, tag = "3")] + pub closure: ::core::option::Option, +} +/// Response object for list launch plan requests. +/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanList { + #[prost(message, repeated, tag = "1")] + pub launch_plans: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Defines permissions associated with executions created by this launch plan spec. +/// Use either of these roles when they have permissions required by your workflow execution. +/// Deprecated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Auth { + /// Defines an optional iam role which will be used for tasks run in executions created with this launch plan. + #[prost(string, tag = "1")] + pub assumable_iam_role: ::prost::alloc::string::String, + /// Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. + #[prost(string, tag = "2")] + pub kubernetes_service_account: ::prost::alloc::string::String, +} +/// User-provided launch plan definition and configuration values. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanSpec { + /// Reference to the Workflow template that the launch plan references + #[prost(message, optional, tag = "1")] + pub workflow_id: ::core::option::Option, + /// Metadata for the Launch Plan + #[prost(message, optional, tag = "2")] + pub entity_metadata: ::core::option::Option, + /// Input values to be passed for the execution. + /// These can be overridden when an execution is created with this launch plan. + #[prost(message, optional, tag = "3")] + pub default_inputs: ::core::option::Option, + /// Fixed, non-overridable inputs for the Launch Plan. + /// These can not be overridden when an execution is created with this launch plan. + #[prost(message, optional, tag = "4")] + pub fixed_inputs: ::core::option::Option, + /// String to indicate the role to use to execute the workflow underneath + #[deprecated] + #[prost(string, tag = "5")] + pub role: ::prost::alloc::string::String, + /// Custom labels to be applied to the execution resource. + #[prost(message, optional, tag = "6")] + pub labels: ::core::option::Option, + /// Custom annotations to be applied to the execution resource. + #[prost(message, optional, tag = "7")] + pub annotations: ::core::option::Option, + /// Indicates the permission associated with workflow executions triggered with this launch plan. + #[deprecated] + #[prost(message, optional, tag = "8")] + pub auth: ::core::option::Option, + #[deprecated] + #[prost(message, optional, tag = "9")] + pub auth_role: ::core::option::Option, + /// Indicates security context for permissions triggered with this launch plan + #[prost(message, optional, tag = "10")] + pub security_context: ::core::option::Option, + /// Indicates the runtime priority of the execution. + #[prost(message, optional, tag = "16")] + pub quality_of_service: ::core::option::Option, + /// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). + #[prost(message, optional, tag = "17")] + pub raw_output_data_config: ::core::option::Option, + /// Controls the maximum number of tasknodes that can be run in parallel for the entire workflow. + /// This is useful to achieve fairness. Note: MapTasks are regarded as one unit, + /// and parallelism/concurrency of MapTasks is independent from this. + #[prost(int32, tag = "18")] + pub max_parallelism: i32, + /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. + /// Omitting this field uses the workflow's value as a default. + /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + /// around the bool field. + #[prost(message, optional, tag = "19")] + pub interruptible: ::core::option::Option, + /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored + /// data once execution finishes successfully. + #[prost(bool, tag = "20")] + pub overwrite_cache: bool, + /// Environment variables to be set for the execution. + #[prost(message, optional, tag = "21")] + pub envs: ::core::option::Option, +} +/// Values computed by the flyte platform after launch plan registration. +/// These include expected_inputs required to be present in a CreateExecutionRequest +/// to launch the reference workflow as well timestamp values associated with the launch plan. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanClosure { + /// Indicate the Launch plan state. + #[prost(enumeration = "LaunchPlanState", tag = "1")] + pub state: i32, + /// Indicates the set of inputs expected when creating an execution with the Launch plan + #[prost(message, optional, tag = "2")] + pub expected_inputs: ::core::option::Option, + /// Indicates the set of outputs expected to be produced by creating an execution with the Launch plan + #[prost(message, optional, tag = "3")] + pub expected_outputs: ::core::option::Option, + /// Time at which the launch plan was created. + #[prost(message, optional, tag = "4")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Time at which the launch plan was last updated. + #[prost(message, optional, tag = "5")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// Additional launch plan attributes included in the LaunchPlanSpec not strictly required to launch +/// the reference workflow. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanMetadata { + /// Schedule to execute the Launch Plan + #[prost(message, optional, tag = "1")] + pub schedule: ::core::option::Option, + /// List of notifications based on Execution status transitions + #[prost(message, repeated, tag = "2")] + pub notifications: ::prost::alloc::vec::Vec, + /// Additional metadata for how to launch the launch plan + #[prost(message, optional, tag = "3")] + pub launch_conditions: ::core::option::Option<::prost_types::Any>, +} +/// Request to set the referenced launch plan state to the configured value. +/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanUpdateRequest { + /// Identifier of launch plan for which to change state. + /// +required. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Desired state to apply to the launch plan. + /// +required. + #[prost(enumeration = "LaunchPlanState", tag = "2")] + pub state: i32, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanUpdateResponse {} +/// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier +/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActiveLaunchPlanRequest { + /// +required. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Represents a request structure to list active launch plans within a project/domain and optional org. +/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ActiveLaunchPlanListRequest { + /// Name of the project that contains the identifiers. + /// +required. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the identifiers belongs to within the project. + /// +required. + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Indicates the number of resources to be returned. + /// +required. + #[prost(uint32, tag = "3")] + pub limit: u32, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "4")] + pub token: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, + /// Optional, org key applied to the resource. + #[prost(string, tag = "6")] + pub org: ::prost::alloc::string::String, +} +/// By default any launch plan regardless of state can be used to launch a workflow execution. +/// However, at most one version of a launch plan +/// (e.g. a NamedEntityIdentifier set of shared project, domain and name values) can be +/// active at a time in regards to *schedules*. That is, at most one schedule in a NamedEntityIdentifier +/// group will be observed and trigger executions at a defined cadence. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum LaunchPlanState { + Inactive = 0, + Active = 1, +} +impl LaunchPlanState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LaunchPlanState::Inactive => "INACTIVE", + LaunchPlanState::Active => "ACTIVE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "INACTIVE" => Some(Self::Inactive), + "ACTIVE" => Some(Self::Active), + _ => None, + } + } +} +/// Indicates that a sent event was not used to update execution state due to +/// the referenced execution already being terminated (and therefore ineligible +/// for further state transitions). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventErrorAlreadyInTerminalState { + /// +required + #[prost(string, tag = "1")] + pub current_phase: ::prost::alloc::string::String, +} +/// Indicates an event was rejected because it came from a different cluster than +/// is on record as running the execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventErrorIncompatibleCluster { + /// The cluster which has been recorded as processing the execution. + /// +required + #[prost(string, tag = "1")] + pub cluster: ::prost::alloc::string::String, +} +/// Indicates why a sent event was not used to update execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventFailureReason { + /// +required + #[prost(oneof = "event_failure_reason::Reason", tags = "1, 2")] + pub reason: ::core::option::Option, +} +/// Nested message and enum types in `EventFailureReason`. +pub mod event_failure_reason { + /// +required + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reason { + #[prost(message, tag = "1")] + AlreadyInTerminalState(super::EventErrorAlreadyInTerminalState), + #[prost(message, tag = "2")] + IncompatibleCluster(super::EventErrorIncompatibleCluster), + } +} +/// Request to send a notification that a workflow execution event has occurred. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionEventRequest { + /// Unique ID for this request that can be traced between services + #[prost(string, tag = "1")] + pub request_id: ::prost::alloc::string::String, + /// Details about the event that occurred. + #[prost(message, optional, tag = "2")] + pub event: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionEventResponse {} +/// Request to send a notification that a node execution event has occurred. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionEventRequest { + /// Unique ID for this request that can be traced between services + #[prost(string, tag = "1")] + pub request_id: ::prost::alloc::string::String, + /// Details about the event that occurred. + #[prost(message, optional, tag = "2")] + pub event: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionEventResponse {} +/// Request to send a notification that a task execution event has occurred. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionEventRequest { + /// Unique ID for this request that can be traced between services + #[prost(string, tag = "1")] + pub request_id: ::prost::alloc::string::String, + /// Details about the event that occurred. + #[prost(message, optional, tag = "2")] + pub event: ::core::option::Option, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionEventResponse {} +/// Request to launch an execution with the given project, domain and optionally-assigned name. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionCreateRequest { + /// Name of the project the execution belongs to. + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the execution belongs to. + /// A domain can be considered as a subset within a specific project. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// User provided value for the resource. + /// If none is provided the system will generate a unique string. + /// +optional + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + /// Additional fields necessary to launch the execution. + /// +optional + #[prost(message, optional, tag = "4")] + pub spec: ::core::option::Option, + /// The inputs required to start the execution. All required inputs must be + /// included in this map. If not required and not provided, defaults apply. + /// +optional + #[prost(message, optional, tag = "5")] + pub inputs: ::core::option::Option, + /// Optional, org key applied to the resource. + #[prost(string, tag = "6")] + pub org: ::prost::alloc::string::String, +} +/// Request to relaunch the referenced execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionRelaunchRequest { + /// Identifier of the workflow execution to relaunch. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// User provided value for the relaunched execution. + /// If none is provided the system will generate a unique string. + /// +optional + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored + /// data once execution finishes successfully. + #[prost(bool, tag = "4")] + pub overwrite_cache: bool, +} +/// Request to recover the referenced execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionRecoverRequest { + /// Identifier of the workflow execution to recover. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// User provided value for the recovered execution. + /// If none is provided the system will generate a unique string. + /// +optional + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Additional metadata which will be used to overwrite any metadata in the reference execution when triggering a recovery execution. + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, +} +/// The unique identifier for a successfully created execution. +/// If the name was *not* specified in the create request, this identifier will include a generated name. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionCreateResponse { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// A message used to fetch a single workflow execution entity. +/// See :ref:`ref_flyteidl.admin.Execution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionGetRequest { + /// Uniquely identifies an individual workflow execution. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// A workflow execution represents an instantiated workflow, including all inputs and additional +/// metadata as well as computed results included state, outputs, and duration-based attributes. +/// Used as a response object used in Get and List execution requests. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Execution { + /// Unique identifier of the workflow execution. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// User-provided configuration and inputs for launching the execution. + #[prost(message, optional, tag = "2")] + pub spec: ::core::option::Option, + /// Execution results. + #[prost(message, optional, tag = "3")] + pub closure: ::core::option::Option, +} +/// Used as a response for request to list executions. +/// See :ref:`ref_flyteidl.admin.Execution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionList { + #[prost(message, repeated, tag = "1")] + pub executions: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Input/output data can represented by actual values or a link to where values are stored +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiteralMapBlob { + #[prost(oneof = "literal_map_blob::Data", tags = "1, 2")] + pub data: ::core::option::Option, +} +/// Nested message and enum types in `LiteralMapBlob`. +pub mod literal_map_blob { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Data { + /// Data in LiteralMap format + #[prost(message, tag = "1")] + Values(super::super::core::LiteralMap), + /// In the event that the map is too large, we return a uri to the data + #[prost(string, tag = "2")] + Uri(::prost::alloc::string::String), + } +} +/// Specifies metadata around an aborted workflow execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AbortMetadata { + /// In the case of a user-specified abort, this will pass along the user-supplied cause. + #[prost(string, tag = "1")] + pub cause: ::prost::alloc::string::String, + /// Identifies the entity (if any) responsible for terminating the execution + #[prost(string, tag = "2")] + pub principal: ::prost::alloc::string::String, +} +/// Encapsulates the results of the Execution +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionClosure { + /// Inputs computed and passed for execution. + /// computed_inputs depends on inputs in ExecutionSpec, fixed and default inputs in launch plan + #[deprecated] + #[prost(message, optional, tag = "3")] + pub computed_inputs: ::core::option::Option, + /// Most recent recorded phase for the execution. + #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "4")] + pub phase: i32, + /// Reported time at which the execution began running. + #[prost(message, optional, tag = "5")] + pub started_at: ::core::option::Option<::prost_types::Timestamp>, + /// The amount of time the execution spent running. + #[prost(message, optional, tag = "6")] + pub duration: ::core::option::Option<::prost_types::Duration>, + /// Reported time at which the execution was created. + #[prost(message, optional, tag = "7")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Reported time at which the execution was last updated. + #[prost(message, optional, tag = "8")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, + /// The notification settings to use after merging the CreateExecutionRequest and the launch plan + /// notification settings. An execution launched with notifications will always prefer that definition + /// to notifications defined statically in a launch plan. + #[prost(message, repeated, tag = "9")] + pub notifications: ::prost::alloc::vec::Vec, + /// Identifies the workflow definition for this execution. + #[prost(message, optional, tag = "11")] + pub workflow_id: ::core::option::Option, + /// Provides the details of the last stage change + #[prost(message, optional, tag = "14")] + pub state_change_details: ::core::option::Option, + /// A result produced by a terminated execution. + /// A pending (non-terminal) execution will not have any output result. + #[prost(oneof = "execution_closure::OutputResult", tags = "1, 2, 10, 12, 13")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `ExecutionClosure`. +pub mod execution_closure { + /// A result produced by a terminated execution. + /// A pending (non-terminal) execution will not have any output result. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// Output URI in the case of a successful execution. + /// DEPRECATED. Use GetExecutionData to fetch output data instead. + #[prost(message, tag = "1")] + Outputs(super::LiteralMapBlob), + /// Error information in the case of a failed execution. + #[prost(message, tag = "2")] + Error(super::super::core::ExecutionError), + /// In the case of a user-specified abort, this will pass along the user-supplied cause. + #[prost(string, tag = "10")] + AbortCause(::prost::alloc::string::String), + /// In the case of a user-specified abort, this will pass along the user and their supplied cause. + #[prost(message, tag = "12")] + AbortMetadata(super::AbortMetadata), + /// Raw output data produced by this execution. + /// DEPRECATED. Use GetExecutionData to fetch output data instead. + #[prost(message, tag = "13")] + OutputData(super::super::core::LiteralMap), + } +} +/// Represents system, rather than user-facing, metadata about an execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SystemMetadata { + /// Which execution cluster this execution ran on. + #[prost(string, tag = "1")] + pub execution_cluster: ::prost::alloc::string::String, + /// Which kubernetes namespace the execution ran under. + #[prost(string, tag = "2")] + pub namespace: ::prost::alloc::string::String, +} +/// Represents attributes about an execution which are not required to launch the execution but are useful to record. +/// These attributes are assigned at launch time and do not change. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionMetadata { + #[prost(enumeration = "execution_metadata::ExecutionMode", tag = "1")] + pub mode: i32, + /// Identifier of the entity that triggered this execution. + /// For systems using back-end authentication any value set here will be discarded in favor of the + /// authenticated user context. + #[prost(string, tag = "2")] + pub principal: ::prost::alloc::string::String, + /// Indicates the nestedness of this execution. + /// If a user launches a workflow execution, the default nesting is 0. + /// If this execution further launches a workflow (child workflow), the nesting level is incremented by 0 => 1 + /// Generally, if workflow at nesting level k launches a workflow then the child workflow will have + /// nesting = k + 1. + #[prost(uint32, tag = "3")] + pub nesting: u32, + /// For scheduled executions, the requested time for execution for this specific schedule invocation. + #[prost(message, optional, tag = "4")] + pub scheduled_at: ::core::option::Option<::prost_types::Timestamp>, + /// Which subworkflow node (if any) launched this execution + #[prost(message, optional, tag = "5")] + pub parent_node_execution: ::core::option::Option< + super::core::NodeExecutionIdentifier, + >, + /// Optional, a reference workflow execution related to this execution. + /// In the case of a relaunch, this references the original workflow execution. + #[prost(message, optional, tag = "16")] + pub reference_execution: ::core::option::Option< + super::core::WorkflowExecutionIdentifier, + >, + /// Optional, platform-specific metadata about the execution. + /// In this the future this may be gated behind an ACL or some sort of authorization. + #[prost(message, optional, tag = "17")] + pub system_metadata: ::core::option::Option, + /// Save a list of the artifacts used in this execution for now. This is a list only rather than a mapping + /// since we don't have a structure to handle nested ones anyways. + #[prost(message, repeated, tag = "18")] + pub artifact_ids: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `ExecutionMetadata`. +pub mod execution_metadata { + /// The method by which this execution was launched. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ExecutionMode { + /// The default execution mode, MANUAL implies that an execution was launched by an individual. + Manual = 0, + /// A schedule triggered this execution launch. + Scheduled = 1, + /// A system process was responsible for launching this execution rather an individual. + System = 2, + /// This execution was launched with identical inputs as a previous execution. + Relaunch = 3, + /// This execution was triggered by another execution. + ChildWorkflow = 4, + /// This execution was recovered from another execution. + Recovered = 5, + /// Execution was kicked off by the artifact trigger system + Trigger = 6, + } + impl ExecutionMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ExecutionMode::Manual => "MANUAL", + ExecutionMode::Scheduled => "SCHEDULED", + ExecutionMode::System => "SYSTEM", + ExecutionMode::Relaunch => "RELAUNCH", + ExecutionMode::ChildWorkflow => "CHILD_WORKFLOW", + ExecutionMode::Recovered => "RECOVERED", + ExecutionMode::Trigger => "TRIGGER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MANUAL" => Some(Self::Manual), + "SCHEDULED" => Some(Self::Scheduled), + "SYSTEM" => Some(Self::System), + "RELAUNCH" => Some(Self::Relaunch), + "CHILD_WORKFLOW" => Some(Self::ChildWorkflow), + "RECOVERED" => Some(Self::Recovered), + "TRIGGER" => Some(Self::Trigger), + _ => None, + } + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NotificationList { + #[prost(message, repeated, tag = "1")] + pub notifications: ::prost::alloc::vec::Vec, +} +/// An ExecutionSpec encompasses all data used to launch this execution. The Spec does not change over the lifetime +/// of an execution as it progresses across phase changes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionSpec { + /// Launch plan to be executed + #[prost(message, optional, tag = "1")] + pub launch_plan: ::core::option::Option, + /// Input values to be passed for the execution + #[deprecated] + #[prost(message, optional, tag = "2")] + pub inputs: ::core::option::Option, + /// Metadata for the execution + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, + /// Labels to apply to the execution resource. + #[prost(message, optional, tag = "7")] + pub labels: ::core::option::Option, + /// Annotations to apply to the execution resource. + #[prost(message, optional, tag = "8")] + pub annotations: ::core::option::Option, + /// Optional: security context override to apply this execution. + #[prost(message, optional, tag = "10")] + pub security_context: ::core::option::Option, + /// Optional: auth override to apply this execution. + #[deprecated] + #[prost(message, optional, tag = "16")] + pub auth_role: ::core::option::Option, + /// Indicates the runtime priority of the execution. + #[prost(message, optional, tag = "17")] + pub quality_of_service: ::core::option::Option, + /// Controls the maximum number of task nodes that can be run in parallel for the entire workflow. + /// This is useful to achieve fairness. Note: MapTasks are regarded as one unit, + /// and parallelism/concurrency of MapTasks is independent from this. + #[prost(int32, tag = "18")] + pub max_parallelism: i32, + /// User setting to configure where to store offloaded data (i.e. Blobs, structured datasets, query data, etc.). + /// This should be a prefix like s3://my-bucket/my-data + #[prost(message, optional, tag = "19")] + pub raw_output_data_config: ::core::option::Option, + /// Controls how to select an available cluster on which this execution should run. + #[prost(message, optional, tag = "20")] + pub cluster_assignment: ::core::option::Option, + /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. + /// Omitting this field uses the workflow's value as a default. + /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper + /// around the bool field. + #[prost(message, optional, tag = "21")] + pub interruptible: ::core::option::Option, + /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. + /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored + /// data once execution finishes successfully. + #[prost(bool, tag = "22")] + pub overwrite_cache: bool, + /// Environment variables to be set for the execution. + #[prost(message, optional, tag = "23")] + pub envs: ::core::option::Option, + /// Tags to be set for the execution. + #[prost(string, repeated, tag = "24")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Execution cluster label to be set for the execution. + #[prost(message, optional, tag = "25")] + pub execution_cluster_label: ::core::option::Option, + #[prost(oneof = "execution_spec::NotificationOverrides", tags = "5, 6")] + pub notification_overrides: ::core::option::Option< + execution_spec::NotificationOverrides, + >, +} +/// Nested message and enum types in `ExecutionSpec`. +pub mod execution_spec { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum NotificationOverrides { + /// List of notifications based on Execution status transitions + /// When this list is not empty it is used rather than any notifications defined in the referenced launch plan. + /// When this list is empty, the notifications defined for the launch plan will be applied. + #[prost(message, tag = "5")] + Notifications(super::NotificationList), + /// This should be set to true if all notifications are intended to be disabled for this execution. + #[prost(bool, tag = "6")] + DisableAll(bool), + } +} +/// Request to terminate an in-progress execution. This action is irreversible. +/// If an execution is already terminated, this request will simply be a no-op. +/// This request will fail if it references a non-existent execution. +/// If the request succeeds the phase "ABORTED" will be recorded for the termination +/// with the optional cause added to the output_result. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionTerminateRequest { + /// Uniquely identifies the individual workflow execution to be terminated. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Optional reason for aborting. + #[prost(string, tag = "2")] + pub cause: ::prost::alloc::string::String, +} +/// Purposefully empty, may be populated in the future. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionTerminateResponse {} +/// Request structure to fetch inputs, output and other data produced by an execution. +/// By default this data is not returned inline in :ref:`ref_flyteidl.admin.WorkflowExecutionGetRequest` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionGetDataRequest { + /// The identifier of the execution for which to fetch inputs and outputs. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Response structure for WorkflowExecutionGetDataRequest which contains inputs and outputs for an execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionGetDataResponse { + /// Signed url to fetch a core.LiteralMap of execution outputs. + /// Deprecated: Please use full_outputs instead. + #[deprecated] + #[prost(message, optional, tag = "1")] + pub outputs: ::core::option::Option, + /// Signed url to fetch a core.LiteralMap of execution inputs. + /// Deprecated: Please use full_inputs instead. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub inputs: ::core::option::Option, + /// Full_inputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "3")] + pub full_inputs: ::core::option::Option, + /// Full_outputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "4")] + pub full_outputs: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionUpdateRequest { + /// Identifier of the execution to update + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// State to set as the new value active/archive + #[prost(enumeration = "ExecutionState", tag = "2")] + pub state: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionStateChangeDetails { + /// The state of the execution is used to control its visibility in the UI/CLI. + #[prost(enumeration = "ExecutionState", tag = "1")] + pub state: i32, + /// This timestamp represents when the state changed. + #[prost(message, optional, tag = "2")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, + /// Identifies the entity (if any) responsible for causing the state change of the execution + #[prost(string, tag = "3")] + pub principal: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionUpdateResponse {} +/// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionGetMetricsRequest { + /// id defines the workflow execution to query for. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// depth defines the number of Flyte entity levels to traverse when breaking down execution details. + #[prost(int32, tag = "2")] + pub depth: i32, +} +/// WorkflowExecutionGetMetricsResponse represents the response containing metrics for the specified workflow execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionGetMetricsResponse { + /// Span defines the top-level breakdown of the workflows execution. More precise information is nested in a + /// hierarchical structure using Flyte entity references. + #[prost(message, optional, tag = "1")] + pub span: ::core::option::Option, +} +/// The state of the execution is used to control its visibility in the UI/CLI. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ExecutionState { + /// By default, all executions are considered active. + ExecutionActive = 0, + /// Archived executions are no longer visible in the UI. + ExecutionArchived = 1, +} +impl ExecutionState { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ExecutionState::ExecutionActive => "EXECUTION_ACTIVE", + ExecutionState::ExecutionArchived => "EXECUTION_ARCHIVED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EXECUTION_ACTIVE" => Some(Self::ExecutionActive), + "EXECUTION_ARCHIVED" => Some(Self::ExecutionArchived), + _ => None, + } + } +} +/// A message used to fetch a single node execution entity. +/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionGetRequest { + /// Uniquely identifies an individual node execution. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Represents a request structure to retrieve a list of node execution entities. +/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionListRequest { + /// Indicates the workflow execution to filter by. + /// +required + #[prost(message, optional, tag = "1")] + pub workflow_execution_id: ::core::option::Option< + super::core::WorkflowExecutionIdentifier, + >, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "2")] + pub limit: u32, + #[prost(string, tag = "3")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "4")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, + /// Unique identifier of the parent node in the execution + /// +optional + #[prost(string, tag = "6")] + pub unique_parent_id: ::prost::alloc::string::String, +} +/// Represents a request structure to retrieve a list of node execution entities launched by a specific task. +/// This can arise when a task yields a subworkflow. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionForTaskListRequest { + /// Indicates the node execution to filter by. + /// +required + #[prost(message, optional, tag = "1")] + pub task_execution_id: ::core::option::Option, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "2")] + pub limit: u32, + /// In the case of multiple pages of results, the, server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "3")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "4")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, +} +/// Encapsulates all details for a single node execution entity. +/// A node represents a component in the overall workflow graph. A node launch a task, multiple tasks, an entire nested +/// sub-workflow, or even a separate child-workflow execution. +/// The same task can be called repeatedly in a single workflow but each node is unique. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecution { + /// Uniquely identifies an individual node execution. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Path to remote data store where input blob is stored. + #[prost(string, tag = "2")] + pub input_uri: ::prost::alloc::string::String, + /// Computed results associated with this node execution. + #[prost(message, optional, tag = "3")] + pub closure: ::core::option::Option, + /// Metadata for Node Execution + #[prost(message, optional, tag = "4")] + pub metadata: ::core::option::Option, +} +/// Represents additional attributes related to a Node Execution +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionMetaData { + /// Node executions are grouped depending on retries of the parent + /// Retry group is unique within the context of a parent node. + #[prost(string, tag = "1")] + pub retry_group: ::prost::alloc::string::String, + /// Boolean flag indicating if the node has child nodes under it + /// This can be true when a node contains a dynamic workflow which then produces + /// child nodes. + #[prost(bool, tag = "2")] + pub is_parent_node: bool, + /// Node id of the node in the original workflow + /// This maps to value of WorkflowTemplate.nodes\[X\].id + #[prost(string, tag = "3")] + pub spec_node_id: ::prost::alloc::string::String, + /// Boolean flag indicating if the node has contains a dynamic workflow which then produces child nodes. + /// This is to distinguish between subworkflows and dynamic workflows which can both have is_parent_node as true. + #[prost(bool, tag = "4")] + pub is_dynamic: bool, + /// Boolean flag indicating if the node is an array node. This is intended to uniquely identify + /// array nodes from other nodes which can have is_parent_node as true. + #[prost(bool, tag = "5")] + pub is_array: bool, +} +/// Request structure to retrieve a list of node execution entities. +/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionList { + #[prost(message, repeated, tag = "1")] + pub node_executions: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Container for node execution details and results. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionClosure { + /// The last recorded phase for this node execution. + #[prost(enumeration = "super::core::node_execution::Phase", tag = "3")] + pub phase: i32, + /// Time at which the node execution began running. + #[prost(message, optional, tag = "4")] + pub started_at: ::core::option::Option<::prost_types::Timestamp>, + /// The amount of time the node execution spent running. + #[prost(message, optional, tag = "5")] + pub duration: ::core::option::Option<::prost_types::Duration>, + /// Time at which the node execution was created. + #[prost(message, optional, tag = "6")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Time at which the node execution was last updated. + #[prost(message, optional, tag = "7")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, + /// String location uniquely identifying where the deck HTML file is. + /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + #[prost(string, tag = "11")] + pub deck_uri: ::prost::alloc::string::String, + /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for a DynamicWorkflow. This is required + /// to correctly recover partially completed executions where the subworkflow has already been compiled. + #[prost(string, tag = "12")] + pub dynamic_job_spec_uri: ::prost::alloc::string::String, + /// Only a node in a terminal state will have a non-empty output_result. + #[prost(oneof = "node_execution_closure::OutputResult", tags = "1, 2, 10")] + pub output_result: ::core::option::Option, + /// Store metadata for what the node launched. + /// for ex: if this is a workflow node, we store information for the launched workflow. + #[prost(oneof = "node_execution_closure::TargetMetadata", tags = "8, 9")] + pub target_metadata: ::core::option::Option, +} +/// Nested message and enum types in `NodeExecutionClosure`. +pub mod node_execution_closure { + /// Only a node in a terminal state will have a non-empty output_result. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// Links to a remotely stored, serialized core.LiteralMap of node execution outputs. + /// DEPRECATED. Use GetNodeExecutionData to fetch output data instead. + #[prost(string, tag = "1")] + OutputUri(::prost::alloc::string::String), + /// Error information for the Node + #[prost(message, tag = "2")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this node execution. + /// DEPRECATED. Use GetNodeExecutionData to fetch output data instead. + #[prost(message, tag = "10")] + OutputData(super::super::core::LiteralMap), + } + /// Store metadata for what the node launched. + /// for ex: if this is a workflow node, we store information for the launched workflow. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum TargetMetadata { + #[prost(message, tag = "8")] + WorkflowNodeMetadata(super::WorkflowNodeMetadata), + #[prost(message, tag = "9")] + TaskNodeMetadata(super::TaskNodeMetadata), + } +} +/// Metadata for a WorkflowNode +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowNodeMetadata { + /// The identifier for a workflow execution launched by a node. + #[prost(message, optional, tag = "1")] + pub execution_id: ::core::option::Option, +} +/// Metadata for the case in which the node is a TaskNode +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskNodeMetadata { + /// Captures the status of caching for this execution. + #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "1")] + pub cache_status: i32, + /// This structure carries the catalog artifact information + #[prost(message, optional, tag = "2")] + pub catalog_key: ::core::option::Option, + /// The latest checkpoint location + #[prost(string, tag = "4")] + pub checkpoint_uri: ::prost::alloc::string::String, +} +/// For dynamic workflow nodes we capture information about the dynamic workflow definition that gets generated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DynamicWorkflowNodeMetadata { + /// id represents the unique identifier of the workflow. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Represents the compiled representation of the embedded dynamic workflow. + #[prost(message, optional, tag = "2")] + pub compiled_workflow: ::core::option::Option, + /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is + /// required to correctly recover partially completed executions where the subworkflow has already been compiled. + #[prost(string, tag = "3")] + pub dynamic_job_spec_uri: ::prost::alloc::string::String, +} +/// Request structure to fetch inputs and output for a node execution. +/// By default, these are not returned in :ref:`ref_flyteidl.admin.NodeExecutionGetRequest` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionGetDataRequest { + /// The identifier of the node execution for which to fetch inputs and outputs. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Response structure for NodeExecutionGetDataRequest which contains inputs and outputs for a node execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionGetDataResponse { + /// Signed url to fetch a core.LiteralMap of node execution inputs. + /// Deprecated: Please use full_inputs instead. + #[deprecated] + #[prost(message, optional, tag = "1")] + pub inputs: ::core::option::Option, + /// Signed url to fetch a core.LiteralMap of node execution outputs. + /// Deprecated: Please use full_outputs instead. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub outputs: ::core::option::Option, + /// Full_inputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "3")] + pub full_inputs: ::core::option::Option, + /// Full_outputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "4")] + pub full_outputs: ::core::option::Option, + /// Optional Workflow closure for a dynamically generated workflow, in the case this node yields a dynamic workflow we return its structure here. + #[prost(message, optional, tag = "16")] + pub dynamic_workflow: ::core::option::Option, + #[prost(message, optional, tag = "17")] + pub flyte_urls: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDynamicNodeWorkflowRequest { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DynamicNodeWorkflowResponse { + #[prost(message, optional, tag = "1")] + pub compiled_workflow: ::core::option::Option, +} +/// A message used to fetch a single task execution entity. +/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionGetRequest { + /// Unique identifier for the task execution. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Represents a request structure to retrieve a list of task execution entities yielded by a specific node execution. +/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionListRequest { + /// Indicates the node execution to filter by. + /// +required + #[prost(message, optional, tag = "1")] + pub node_execution_id: ::core::option::Option, + /// Indicates the number of resources to be returned. + /// +required + #[prost(uint32, tag = "2")] + pub limit: u32, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. + /// +optional + #[prost(string, tag = "3")] + pub token: ::prost::alloc::string::String, + /// Indicates a list of filters passed as string. + /// More info on constructing filters : + /// +optional + #[prost(string, tag = "4")] + pub filters: ::prost::alloc::string::String, + /// Sort ordering for returned list. + /// +optional + #[prost(message, optional, tag = "5")] + pub sort_by: ::core::option::Option, +} +/// Encapsulates all details for a single task execution entity. +/// A task execution represents an instantiated task, including all inputs and additional +/// metadata as well as computed results included state, outputs, and duration-based attributes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecution { + /// Unique identifier for the task execution. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Path to remote data store where input blob is stored. + #[prost(string, tag = "2")] + pub input_uri: ::prost::alloc::string::String, + /// Task execution details and results. + #[prost(message, optional, tag = "3")] + pub closure: ::core::option::Option, + /// Whether this task spawned nodes. + #[prost(bool, tag = "4")] + pub is_parent: bool, +} +/// Response structure for a query to list of task execution entities. +/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionList { + #[prost(message, repeated, tag = "1")] + pub task_executions: ::prost::alloc::vec::Vec, + /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page + /// in a query. If there are no more results, this value will be empty. + #[prost(string, tag = "2")] + pub token: ::prost::alloc::string::String, +} +/// Container for task execution details and results. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionClosure { + /// The last recorded phase for this task execution. + #[prost(enumeration = "super::core::task_execution::Phase", tag = "3")] + pub phase: i32, + /// Detailed log information output by the task execution. + #[prost(message, repeated, tag = "4")] + pub logs: ::prost::alloc::vec::Vec, + /// Time at which the task execution began running. + #[prost(message, optional, tag = "5")] + pub started_at: ::core::option::Option<::prost_types::Timestamp>, + /// The amount of time the task execution spent running. + #[prost(message, optional, tag = "6")] + pub duration: ::core::option::Option<::prost_types::Duration>, + /// Time at which the task execution was created. + #[prost(message, optional, tag = "7")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Time at which the task execution was last updated. + #[prost(message, optional, tag = "8")] + pub updated_at: ::core::option::Option<::prost_types::Timestamp>, + /// Custom data specific to the task plugin. + #[prost(message, optional, tag = "9")] + pub custom_info: ::core::option::Option<::prost_types::Struct>, + /// If there is an explanation for the most recent phase transition, the reason will capture it. + #[prost(string, tag = "10")] + pub reason: ::prost::alloc::string::String, + /// A predefined yet extensible Task type identifier. + #[prost(string, tag = "11")] + pub task_type: ::prost::alloc::string::String, + /// Metadata around how a task was executed. + #[prost(message, optional, tag = "16")] + pub metadata: ::core::option::Option, + /// The event version is used to indicate versioned changes in how data is maintained using this + /// proto message. For example, event_verison > 0 means that maps tasks logs use the + /// TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + /// in this message. + #[prost(int32, tag = "17")] + pub event_version: i32, + /// A time-series of the phase transition or update explanations. This, when compared to storing a singular reason + /// as previously done, is much more valuable in visualizing and understanding historical evaluations. + #[prost(message, repeated, tag = "18")] + pub reasons: ::prost::alloc::vec::Vec, + #[prost(oneof = "task_execution_closure::OutputResult", tags = "1, 2, 12")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `TaskExecutionClosure`. +pub mod task_execution_closure { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// Path to remote data store where output blob is stored if the execution succeeded (and produced outputs). + /// DEPRECATED. Use GetTaskExecutionData to fetch output data instead. + #[prost(string, tag = "1")] + OutputUri(::prost::alloc::string::String), + /// Error information for the task execution. Populated if the execution failed. + #[prost(message, tag = "2")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this task execution. + /// DEPRECATED. Use GetTaskExecutionData to fetch output data instead. + #[prost(message, tag = "12")] + OutputData(super::super::core::LiteralMap), + } +} +/// Reason is a single message annotated with a timestamp to indicate the instant the reason occurred. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Reason { + /// occurred_at is the timestamp indicating the instant that this reason happened. + #[prost(message, optional, tag = "1")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, + /// message is the explanation for the most recent phase transition or status update. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, +} +/// Request structure to fetch inputs and output for a task execution. +/// By default this data is not returned inline in :ref:`ref_flyteidl.admin.TaskExecutionGetRequest` +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionGetDataRequest { + /// The identifier of the task execution for which to fetch inputs and outputs. + /// +required + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +/// Response structure for TaskExecutionGetDataRequest which contains inputs and outputs for a task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionGetDataResponse { + /// Signed url to fetch a core.LiteralMap of task execution inputs. + /// Deprecated: Please use full_inputs instead. + #[deprecated] + #[prost(message, optional, tag = "1")] + pub inputs: ::core::option::Option, + /// Signed url to fetch a core.LiteralMap of task execution outputs. + /// Deprecated: Please use full_outputs instead. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub outputs: ::core::option::Option, + /// Full_inputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "3")] + pub full_inputs: ::core::option::Option, + /// Full_outputs will only be populated if they are under a configured size threshold. + #[prost(message, optional, tag = "4")] + pub full_outputs: ::core::option::Option, + /// flyte tiny url to fetch a core.LiteralMap of task execution's IO + /// Deck will be empty for task + #[prost(message, optional, tag = "5")] + pub flyte_urls: ::core::option::Option, +} +/// Response for the GetVersion API +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetVersionResponse { + /// The control plane version information. FlyteAdmin and related components + /// form the control plane of Flyte + #[prost(message, optional, tag = "1")] + pub control_plane_version: ::core::option::Option, +} +/// Provides Version information for a component +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Version { + /// Specifies the GIT sha of the build + #[prost(string, tag = "1")] + pub build: ::prost::alloc::string::String, + /// Version for the build, should follow a semver + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + /// Build timestamp + #[prost(string, tag = "3")] + pub build_time: ::prost::alloc::string::String, +} +/// Empty request for GetVersion +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetVersionRequest {} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs new file mode 100644 index 0000000000..54ee6985b9 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs @@ -0,0 +1,399 @@ +/// +/// Additional metadata as key-value pairs +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyMapMetadata { + /// Additional metadata as key-value pairs + #[prost(map = "string, string", tag = "1")] + pub values: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// +/// Metadata for cached outputs, including the source identifier and timestamps. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// Source task or workflow identifier + #[prost(message, optional, tag = "1")] + pub source_identifier: ::core::option::Option, + /// Additional metadata as key-value pairs + #[prost(message, optional, tag = "2")] + pub key_map: ::core::option::Option, + /// Creation timestamp + #[prost(message, optional, tag = "3")] + pub created_at: ::core::option::Option<::prost_types::Timestamp>, + /// Last update timestamp + #[prost(message, optional, tag = "4")] + pub last_updated_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// +/// Represents cached output, either as literals or an URI, with associated metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CachedOutput { + /// Associated metadata + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, + #[prost(oneof = "cached_output::Output", tags = "1, 2")] + pub output: ::core::option::Option, +} +/// Nested message and enum types in `CachedOutput`. +pub mod cached_output { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Output { + /// Output literals + #[prost(message, tag = "1")] + OutputLiterals(super::super::core::LiteralMap), + /// URI to output data + #[prost(string, tag = "2")] + OutputUri(::prost::alloc::string::String), + } +} +/// +/// Request to retrieve cached data by key. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCacheRequest { + /// Cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, +} +/// +/// Response with cached data for a given key. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCacheResponse { + /// Cached output + #[prost(message, optional, tag = "1")] + pub output: ::core::option::Option, +} +/// +/// Request to store/update cached data by key. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PutCacheRequest { + /// Cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// Output to cache + #[prost(message, optional, tag = "2")] + pub output: ::core::option::Option, + /// Overwrite flag + #[prost(bool, tag = "3")] + pub overwrite: bool, +} +/// +/// Response message of cache store/update operation. +/// +/// Empty, success indicated by no errors +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PutCacheResponse {} +/// +/// Request to delete cached data by key. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteCacheRequest { + /// Cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, +} +/// +/// Response message of cache deletion operation. +/// +/// Empty, success indicated by no errors +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteCacheResponse {} +/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Reservation { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag = "3")] + pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, + /// Expiration timestamp of this reservation + #[prost(message, optional, tag = "4")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// +/// Request to get or extend a reservation for a cache key +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationRequest { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag = "3")] + pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, +} +/// +/// Request to get or extend a reservation for a cache key +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationResponse { + /// The reservation that was created or extended + #[prost(message, optional, tag = "1")] + pub reservation: ::core::option::Option, +} +/// +/// Request to release the reservation for a cache key +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationRequest { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag = "2")] + pub owner_id: ::prost::alloc::string::String, +} +/// +/// Response message of release reservation operation. +/// +/// Empty, success indicated by no errors +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationResponse {} +/// Generated client implementations. +pub mod cache_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// + /// CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + #[derive(Debug, Clone)] + pub struct CacheServiceClient { + inner: tonic::client::Grpc, + } + impl CacheServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CacheServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CacheServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + CacheServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Retrieves cached data by key. + pub async fn get( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Get", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Get")); + self.inner.unary(req, path, codec).await + } + /// Stores or updates cached data by key. + pub async fn put( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Put", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Put")); + self.inner.unary(req, path, codec).await + } + /// Deletes cached data by key. + pub async fn delete( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/Delete", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Delete")); + self.inner.unary(req, path, codec).await + } + /// Get or extend a reservation for a cache key + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.cacheservice.CacheService", + "GetOrExtendReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Release the reservation for a cache key + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.cacheservice.CacheService/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.cacheservice.CacheService", + "ReleaseReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs new file mode 100644 index 0000000000..8aa1741309 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs @@ -0,0 +1,3162 @@ +/// Indicates various phases of Workflow Execution +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecution {} +/// Nested message and enum types in `WorkflowExecution`. +pub mod workflow_execution { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Phase { + Undefined = 0, + Queued = 1, + Running = 2, + Succeeding = 3, + Succeeded = 4, + Failing = 5, + Failed = 6, + Aborted = 7, + TimedOut = 8, + Aborting = 9, + } + impl Phase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Phase::Undefined => "UNDEFINED", + Phase::Queued => "QUEUED", + Phase::Running => "RUNNING", + Phase::Succeeding => "SUCCEEDING", + Phase::Succeeded => "SUCCEEDED", + Phase::Failing => "FAILING", + Phase::Failed => "FAILED", + Phase::Aborted => "ABORTED", + Phase::TimedOut => "TIMED_OUT", + Phase::Aborting => "ABORTING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNDEFINED" => Some(Self::Undefined), + "QUEUED" => Some(Self::Queued), + "RUNNING" => Some(Self::Running), + "SUCCEEDING" => Some(Self::Succeeding), + "SUCCEEDED" => Some(Self::Succeeded), + "FAILING" => Some(Self::Failing), + "FAILED" => Some(Self::Failed), + "ABORTED" => Some(Self::Aborted), + "TIMED_OUT" => Some(Self::TimedOut), + "ABORTING" => Some(Self::Aborting), + _ => None, + } + } + } +} +/// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecution {} +/// Nested message and enum types in `NodeExecution`. +pub mod node_execution { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Phase { + Undefined = 0, + Queued = 1, + Running = 2, + Succeeded = 3, + Failing = 4, + Failed = 5, + Aborted = 6, + Skipped = 7, + TimedOut = 8, + DynamicRunning = 9, + Recovered = 10, + } + impl Phase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Phase::Undefined => "UNDEFINED", + Phase::Queued => "QUEUED", + Phase::Running => "RUNNING", + Phase::Succeeded => "SUCCEEDED", + Phase::Failing => "FAILING", + Phase::Failed => "FAILED", + Phase::Aborted => "ABORTED", + Phase::Skipped => "SKIPPED", + Phase::TimedOut => "TIMED_OUT", + Phase::DynamicRunning => "DYNAMIC_RUNNING", + Phase::Recovered => "RECOVERED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNDEFINED" => Some(Self::Undefined), + "QUEUED" => Some(Self::Queued), + "RUNNING" => Some(Self::Running), + "SUCCEEDED" => Some(Self::Succeeded), + "FAILING" => Some(Self::Failing), + "FAILED" => Some(Self::Failed), + "ABORTED" => Some(Self::Aborted), + "SKIPPED" => Some(Self::Skipped), + "TIMED_OUT" => Some(Self::TimedOut), + "DYNAMIC_RUNNING" => Some(Self::DynamicRunning), + "RECOVERED" => Some(Self::Recovered), + _ => None, + } + } + } +} +/// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, +/// but this is the cumulative list that customers may want to know about for their task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecution {} +/// Nested message and enum types in `TaskExecution`. +pub mod task_execution { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Phase { + Undefined = 0, + Queued = 1, + Running = 2, + Succeeded = 3, + Aborted = 4, + Failed = 5, + /// To indicate cases where task is initializing, like: ErrImagePull, ContainerCreating, PodInitializing + Initializing = 6, + /// To address cases, where underlying resource is not available: Backoff error, Resource quota exceeded + WaitingForResources = 7, + } + impl Phase { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Phase::Undefined => "UNDEFINED", + Phase::Queued => "QUEUED", + Phase::Running => "RUNNING", + Phase::Succeeded => "SUCCEEDED", + Phase::Aborted => "ABORTED", + Phase::Failed => "FAILED", + Phase::Initializing => "INITIALIZING", + Phase::WaitingForResources => "WAITING_FOR_RESOURCES", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNDEFINED" => Some(Self::Undefined), + "QUEUED" => Some(Self::Queued), + "RUNNING" => Some(Self::Running), + "SUCCEEDED" => Some(Self::Succeeded), + "ABORTED" => Some(Self::Aborted), + "FAILED" => Some(Self::Failed), + "INITIALIZING" => Some(Self::Initializing), + "WAITING_FOR_RESOURCES" => Some(Self::WaitingForResources), + _ => None, + } + } + } +} +/// Represents the error message from the execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionError { + /// Error code indicates a grouping of a type of error. + /// More Info: + #[prost(string, tag = "1")] + pub code: ::prost::alloc::string::String, + /// Detailed description of the error - including stack trace. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, + /// Full error contents accessible via a URI + #[prost(string, tag = "3")] + pub error_uri: ::prost::alloc::string::String, + #[prost(enumeration = "execution_error::ErrorKind", tag = "4")] + pub kind: i32, +} +/// Nested message and enum types in `ExecutionError`. +pub mod execution_error { + /// Error type: System or User + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ErrorKind { + Unknown = 0, + User = 1, + System = 2, + } + impl ErrorKind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ErrorKind::Unknown => "UNKNOWN", + ErrorKind::User => "USER", + ErrorKind::System => "SYSTEM", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "USER" => Some(Self::User), + "SYSTEM" => Some(Self::System), + _ => None, + } + } + } +} +/// Log information for the task that is specific to a log sink +/// When our log story is flushed out, we may have more metadata here like log link expiry +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskLog { + #[prost(string, tag = "1")] + pub uri: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + #[prost(enumeration = "task_log::MessageFormat", tag = "3")] + pub message_format: i32, + #[prost(message, optional, tag = "4")] + pub ttl: ::core::option::Option<::prost_types::Duration>, +} +/// Nested message and enum types in `TaskLog`. +pub mod task_log { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum MessageFormat { + Unknown = 0, + Csv = 1, + Json = 2, + } + impl MessageFormat { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MessageFormat::Unknown => "UNKNOWN", + MessageFormat::Csv => "CSV", + MessageFormat::Json => "JSON", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "CSV" => Some(Self::Csv), + "JSON" => Some(Self::Json), + _ => None, + } + } + } +} +/// Represents customized execution run-time attributes. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QualityOfServiceSpec { + /// Indicates how much queueing delay an execution can tolerate. + #[prost(message, optional, tag = "1")] + pub queueing_budget: ::core::option::Option<::prost_types::Duration>, +} +/// Indicates the priority of an execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QualityOfService { + #[prost(oneof = "quality_of_service::Designation", tags = "1, 2")] + pub designation: ::core::option::Option, +} +/// Nested message and enum types in `QualityOfService`. +pub mod quality_of_service { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Tier { + /// Default: no quality of service specified. + Undefined = 0, + High = 1, + Medium = 2, + Low = 3, + } + impl Tier { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Tier::Undefined => "UNDEFINED", + Tier::High => "HIGH", + Tier::Medium => "MEDIUM", + Tier::Low => "LOW", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNDEFINED" => Some(Self::Undefined), + "HIGH" => Some(Self::High), + "MEDIUM" => Some(Self::Medium), + "LOW" => Some(Self::Low), + _ => None, + } + } + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Designation { + #[prost(enumeration = "Tier", tag = "1")] + Tier(i32), + #[prost(message, tag = "2")] + Spec(super::QualityOfServiceSpec), + } +} +/// Encapsulation of fields that uniquely identifies a Flyte resource. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Identifier { + /// Identifies the specific type of resource that this identifier corresponds to. + #[prost(enumeration = "ResourceType", tag = "1")] + pub resource_type: i32, + /// Name of the project the resource belongs to. + #[prost(string, tag = "2")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the resource belongs to. + /// A domain can be considered as a subset within a specific project. + #[prost(string, tag = "3")] + pub domain: ::prost::alloc::string::String, + /// User provided value for the resource. + #[prost(string, tag = "4")] + pub name: ::prost::alloc::string::String, + /// Specific version of the resource. + #[prost(string, tag = "5")] + pub version: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "6")] + pub org: ::prost::alloc::string::String, +} +/// Encapsulation of fields that uniquely identifies a Flyte workflow execution +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionIdentifier { + /// Name of the project the resource belongs to. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Name of the domain the resource belongs to. + /// A domain can be considered as a subset within a specific project. + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// User or system provided value for the resource. + #[prost(string, tag = "4")] + pub name: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag = "5")] + pub org: ::prost::alloc::string::String, +} +/// Encapsulation of fields that identify a Flyte node execution entity. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionIdentifier { + #[prost(string, tag = "1")] + pub node_id: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub execution_id: ::core::option::Option, +} +/// Encapsulation of fields that identify a Flyte task execution entity. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionIdentifier { + #[prost(message, optional, tag = "1")] + pub task_id: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub node_execution_id: ::core::option::Option, + #[prost(uint32, tag = "3")] + pub retry_attempt: u32, +} +/// Encapsulation of fields the uniquely identify a signal. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalIdentifier { + /// Unique identifier for a signal. + #[prost(string, tag = "1")] + pub signal_id: ::prost::alloc::string::String, + /// Identifies the Flyte workflow execution this signal belongs to. + #[prost(message, optional, tag = "2")] + pub execution_id: ::core::option::Option, +} +/// Indicates a resource type within Flyte. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ResourceType { + Unspecified = 0, + Task = 1, + Workflow = 2, + LaunchPlan = 3, + /// A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects. + /// Eventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects + /// in a similar manner to other Flyte objects + Dataset = 4, +} +impl ResourceType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ResourceType::Unspecified => "UNSPECIFIED", + ResourceType::Task => "TASK", + ResourceType::Workflow => "WORKFLOW", + ResourceType::LaunchPlan => "LAUNCH_PLAN", + ResourceType::Dataset => "DATASET", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSPECIFIED" => Some(Self::Unspecified), + "TASK" => Some(Self::Task), + "WORKFLOW" => Some(Self::Workflow), + "LAUNCH_PLAN" => Some(Self::LaunchPlan), + "DATASET" => Some(Self::Dataset), + _ => None, + } + } +} +/// Defines schema columns and types to strongly type-validate schemas interoperability. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SchemaType { + /// A list of ordered columns this schema comprises of. + #[prost(message, repeated, tag = "3")] + pub columns: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `SchemaType`. +pub mod schema_type { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SchemaColumn { + /// A unique name -within the schema type- for the column + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The column type. This allows a limited set of types currently. + #[prost(enumeration = "schema_column::SchemaColumnType", tag = "2")] + pub r#type: i32, + } + /// Nested message and enum types in `SchemaColumn`. + pub mod schema_column { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum SchemaColumnType { + Integer = 0, + Float = 1, + String = 2, + Boolean = 3, + Datetime = 4, + Duration = 5, + } + impl SchemaColumnType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SchemaColumnType::Integer => "INTEGER", + SchemaColumnType::Float => "FLOAT", + SchemaColumnType::String => "STRING", + SchemaColumnType::Boolean => "BOOLEAN", + SchemaColumnType::Datetime => "DATETIME", + SchemaColumnType::Duration => "DURATION", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "INTEGER" => Some(Self::Integer), + "FLOAT" => Some(Self::Float), + "STRING" => Some(Self::String), + "BOOLEAN" => Some(Self::Boolean), + "DATETIME" => Some(Self::Datetime), + "DURATION" => Some(Self::Duration), + _ => None, + } + } + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StructuredDatasetType { + /// A list of ordered columns this schema comprises of. + #[prost(message, repeated, tag = "1")] + pub columns: ::prost::alloc::vec::Vec, + /// This is the storage format, the format of the bits at rest + /// parquet, feather, csv, etc. + /// For two types to be compatible, the format will need to be an exact match. + #[prost(string, tag = "2")] + pub format: ::prost::alloc::string::String, + /// This is a string representing the type that the bytes in external_schema_bytes are formatted in. + /// This is an optional field that will not be used for type checking. + #[prost(string, tag = "3")] + pub external_schema_type: ::prost::alloc::string::String, + /// The serialized bytes of a third-party schema library like Arrow. + /// This is an optional field that will not be used for type checking. + #[prost(bytes = "vec", tag = "4")] + pub external_schema_bytes: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `StructuredDatasetType`. +pub mod structured_dataset_type { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DatasetColumn { + /// A unique name within the schema type for the column. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The column type. + #[prost(message, optional, tag = "2")] + pub literal_type: ::core::option::Option, + } +} +/// Defines type behavior for blob objects +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobType { + /// Format can be a free form string understood by SDK/UI etc like + /// csv, parquet etc + #[prost(string, tag = "1")] + pub format: ::prost::alloc::string::String, + #[prost(enumeration = "blob_type::BlobDimensionality", tag = "2")] + pub dimensionality: i32, +} +/// Nested message and enum types in `BlobType`. +pub mod blob_type { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum BlobDimensionality { + Single = 0, + Multipart = 1, + } + impl BlobDimensionality { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + BlobDimensionality::Single => "SINGLE", + BlobDimensionality::Multipart => "MULTIPART", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SINGLE" => Some(Self::Single), + "MULTIPART" => Some(Self::Multipart), + _ => None, + } + } + } +} +/// Enables declaring enum types, with predefined string values +/// For len(values) > 0, the first value in the ordered list is regarded as the default value. If you wish +/// To provide no defaults, make the first value as undefined. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EnumType { + /// Predefined set of enum values. + #[prost(string, repeated, tag = "1")] + pub values: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Defines a tagged union type, also known as a variant (and formally as the sum type). +/// +/// A sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag +/// A value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by +/// storing the varaint's tag with the literal value and can be examined in runtime. +/// +/// Type S is typically written as +/// S := Apple A | Banana B | Cantaloupe C | ... +/// +/// Notably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value: +/// Optional X := X | Null +/// +/// See also: +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnionType { + /// Predefined set of variants in union. + #[prost(message, repeated, tag = "1")] + pub variants: ::prost::alloc::vec::Vec, +} +/// Hints to improve type matching +/// e.g. allows distinguishing output from custom type transformers +/// even if the underlying IDL serialization matches. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeStructure { + /// Must exactly match for types to be castable + #[prost(string, tag = "1")] + pub tag: ::prost::alloc::string::String, + /// dataclass_type only exists for dataclasses. + /// This is used to resolve the type of the fields of dataclass + /// The key is the field name, and the value is the literal type of the field + /// e.g. For dataclass Foo, with fields a, and a is a string + /// Foo.a will be resolved as a literal type of string from dataclass_type + #[prost(map = "string, message", tag = "2")] + pub dataclass_type: ::std::collections::HashMap< + ::prost::alloc::string::String, + LiteralType, + >, +} +/// TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypeAnnotation { + /// A arbitrary JSON payload to describe a type. + #[prost(message, optional, tag = "1")] + pub annotations: ::core::option::Option<::prost_types::Struct>, +} +/// Defines a strong type to allow type checking between interfaces. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiteralType { + /// This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by + /// consumers to identify special behavior or display extended information for the type. + #[prost(message, optional, tag = "6")] + pub metadata: ::core::option::Option<::prost_types::Struct>, + /// This field contains arbitrary data that might have special semantic + /// meaning for the client but does not effect internal flyte behavior. + #[prost(message, optional, tag = "9")] + pub annotation: ::core::option::Option, + /// Hints to improve type matching. + #[prost(message, optional, tag = "11")] + pub structure: ::core::option::Option, + #[prost(oneof = "literal_type::Type", tags = "1, 2, 3, 4, 5, 7, 8, 10")] + pub r#type: ::core::option::Option, +} +/// Nested message and enum types in `LiteralType`. +pub mod literal_type { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Type { + /// A simple type that can be compared one-to-one with another. + #[prost(enumeration = "super::SimpleType", tag = "1")] + Simple(i32), + /// A complex type that requires matching of inner fields. + #[prost(message, tag = "2")] + Schema(super::SchemaType), + /// Defines the type of the value of a collection. Only homogeneous collections are allowed. + #[prost(message, tag = "3")] + CollectionType(::prost::alloc::boxed::Box), + /// Defines the type of the value of a map type. The type of the key is always a string. + #[prost(message, tag = "4")] + MapValueType(::prost::alloc::boxed::Box), + /// A blob might have specialized implementation details depending on associated metadata. + #[prost(message, tag = "5")] + Blob(super::BlobType), + /// Defines an enum with pre-defined string values. + #[prost(message, tag = "7")] + EnumType(super::EnumType), + /// Generalized schema support + #[prost(message, tag = "8")] + StructuredDatasetType(super::StructuredDatasetType), + /// Defines an union type with pre-defined LiteralTypes. + #[prost(message, tag = "10")] + UnionType(super::UnionType), + } +} +/// A reference to an output produced by a node. The type can be retrieved -and validated- from +/// the underlying interface of the node. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OutputReference { + /// Node id must exist at the graph layer. + #[prost(string, tag = "1")] + pub node_id: ::prost::alloc::string::String, + /// Variable name must refer to an output variable for the node. + #[prost(string, tag = "2")] + pub var: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "3")] + pub attr_path: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PromiseAttribute { + #[prost(oneof = "promise_attribute::Value", tags = "1, 2")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `PromiseAttribute`. +pub mod promise_attribute { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(string, tag = "1")] + StringValue(::prost::alloc::string::String), + #[prost(int32, tag = "2")] + IntValue(i32), + } +} +/// Represents an error thrown from a node. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Error { + /// The node id that threw the error. + #[prost(string, tag = "1")] + pub failed_node_id: ::prost::alloc::string::String, + /// Error message thrown. + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, +} +/// Define a set of simple types. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum SimpleType { + None = 0, + Integer = 1, + Float = 2, + String = 3, + Boolean = 4, + Datetime = 5, + Duration = 6, + Binary = 7, + Error = 8, + Struct = 9, +} +impl SimpleType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SimpleType::None => "NONE", + SimpleType::Integer => "INTEGER", + SimpleType::Float => "FLOAT", + SimpleType::String => "STRING", + SimpleType::Boolean => "BOOLEAN", + SimpleType::Datetime => "DATETIME", + SimpleType::Duration => "DURATION", + SimpleType::Binary => "BINARY", + SimpleType::Error => "ERROR", + SimpleType::Struct => "STRUCT", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NONE" => Some(Self::None), + "INTEGER" => Some(Self::Integer), + "FLOAT" => Some(Self::Float), + "STRING" => Some(Self::String), + "BOOLEAN" => Some(Self::Boolean), + "DATETIME" => Some(Self::Datetime), + "DURATION" => Some(Self::Duration), + "BINARY" => Some(Self::Binary), + "ERROR" => Some(Self::Error), + "STRUCT" => Some(Self::Struct), + _ => None, + } + } +} +/// Primitive Types +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Primitive { + /// Defines one of simple primitive types. These types will get translated into different programming languages as + /// described in + #[prost(oneof = "primitive::Value", tags = "1, 2, 3, 4, 5, 6")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Primitive`. +pub mod primitive { + /// Defines one of simple primitive types. These types will get translated into different programming languages as + /// described in + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(int64, tag = "1")] + Integer(i64), + #[prost(double, tag = "2")] + FloatValue(f64), + #[prost(string, tag = "3")] + StringValue(::prost::alloc::string::String), + #[prost(bool, tag = "4")] + Boolean(bool), + #[prost(message, tag = "5")] + Datetime(::prost_types::Timestamp), + #[prost(message, tag = "6")] + Duration(::prost_types::Duration), + } +} +/// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally +/// undefined since it can be assigned to a scalar of any LiteralType. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Void {} +/// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. +/// There are no restrictions on how the uri is formatted since it will depend on how to interact with the store. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Blob { + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + #[prost(string, tag = "3")] + pub uri: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlobMetadata { + #[prost(message, optional, tag = "1")] + pub r#type: ::core::option::Option, +} +/// A simple byte array with a tag to help different parts of the system communicate about what is in the byte array. +/// It's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Binary { + #[prost(bytes = "vec", tag = "1")] + pub value: ::prost::alloc::vec::Vec, + #[prost(string, tag = "2")] + pub tag: ::prost::alloc::string::String, +} +/// A strongly typed schema that defines the interface of data retrieved from the underlying storage medium. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Schema { + #[prost(string, tag = "1")] + pub uri: ::prost::alloc::string::String, + #[prost(message, optional, tag = "3")] + pub r#type: ::core::option::Option, +} +/// The runtime representation of a tagged union value. See `UnionType` for more details. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Union { + #[prost(message, optional, boxed, tag = "1")] + pub value: ::core::option::Option<::prost::alloc::boxed::Box>, + #[prost(message, optional, tag = "2")] + pub r#type: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StructuredDatasetMetadata { + /// Bundle the type information along with the literal. + /// This is here because StructuredDatasets can often be more defined at run time than at compile time. + /// That is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset, + /// without any column information, but at run time, you might have that column information. + /// flytekit python will copy this type information into the literal, from the type information, if not provided by + /// the various plugins (encoders). + /// Since this field is run time generated, it's not used for any type checking. + #[prost(message, optional, tag = "1")] + pub structured_dataset_type: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StructuredDataset { + /// String location uniquely identifying where the data is. + /// Should start with the storage location (e.g. s3://, gs://, bq://, etc.) + #[prost(string, tag = "1")] + pub uri: ::prost::alloc::string::String, + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Scalar { + #[prost(oneof = "scalar::Value", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Scalar`. +pub mod scalar { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(message, tag = "1")] + Primitive(super::Primitive), + #[prost(message, tag = "2")] + Blob(super::Blob), + #[prost(message, tag = "3")] + Binary(super::Binary), + #[prost(message, tag = "4")] + Schema(super::Schema), + #[prost(message, tag = "5")] + NoneType(super::Void), + #[prost(message, tag = "6")] + Error(super::Error), + #[prost(message, tag = "7")] + Generic(::prost_types::Struct), + #[prost(message, tag = "8")] + StructuredDataset(super::StructuredDataset), + #[prost(message, tag = "9")] + Union(::prost::alloc::boxed::Box), + } +} +/// A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Literal { + /// A hash representing this literal. + /// This is used for caching purposes. For more details refer to RFC 1893 + /// () + #[prost(string, tag = "4")] + pub hash: ::prost::alloc::string::String, + /// Additional metadata for literals. + #[prost(map = "string, string", tag = "5")] + pub metadata: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + #[prost(oneof = "literal::Value", tags = "1, 2, 3")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `Literal`. +pub mod literal { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + /// A simple value. + #[prost(message, tag = "1")] + Scalar(::prost::alloc::boxed::Box), + /// A collection of literals to allow nesting. + #[prost(message, tag = "2")] + Collection(super::LiteralCollection), + /// A map of strings to literals. + #[prost(message, tag = "3")] + Map(super::LiteralMap), + } +} +/// A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiteralCollection { + #[prost(message, repeated, tag = "1")] + pub literals: ::prost::alloc::vec::Vec, +} +/// A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LiteralMap { + #[prost(map = "string, message", tag = "1")] + pub literals: ::std::collections::HashMap<::prost::alloc::string::String, Literal>, +} +/// A collection of BindingData items. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BindingDataCollection { + #[prost(message, repeated, tag = "1")] + pub bindings: ::prost::alloc::vec::Vec, +} +/// A map of BindingData items. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BindingDataMap { + #[prost(map = "string, message", tag = "1")] + pub bindings: ::std::collections::HashMap< + ::prost::alloc::string::String, + BindingData, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnionInfo { + #[prost(message, optional, tag = "1")] + pub target_type: ::core::option::Option, +} +/// Specifies either a simple value or a reference to another output. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BindingData { + #[prost(message, optional, tag = "5")] + pub union: ::core::option::Option, + #[prost(oneof = "binding_data::Value", tags = "1, 2, 3, 4")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `BindingData`. +pub mod binding_data { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + /// A simple scalar value. + #[prost(message, tag = "1")] + Scalar(super::Scalar), + /// A collection of binding data. This allows nesting of binding data to any number + /// of levels. + #[prost(message, tag = "2")] + Collection(super::BindingDataCollection), + /// References an output promised by another node. + #[prost(message, tag = "3")] + Promise(super::OutputReference), + /// A map of bindings. The key is always a string. + #[prost(message, tag = "4")] + Map(super::BindingDataMap), + } +} +/// An input/output binding of a variable to either static value or a node output. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Binding { + /// Variable name must match an input/output variable of the node. + #[prost(string, tag = "1")] + pub var: ::prost::alloc::string::String, + /// Data to use to bind this variable. + #[prost(message, optional, tag = "2")] + pub binding: ::core::option::Option, +} +/// A generic key value pair. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValuePair { + /// required. + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// +optional. + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, +} +/// Retry strategy associated with an executable unit. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RetryStrategy { + /// Number of retries. Retries will be consumed when the job fails with a recoverable error. + /// The number of retries must be less than or equals to 10. + #[prost(uint32, tag = "5")] + pub retries: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactKey { + /// Project and domain and suffix needs to be unique across a given artifact store. + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub org: ::prost::alloc::string::String, +} +/// Only valid for triggers +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactBindingData { + /// This is only relevant in the time partition case + #[prost(message, optional, tag = "7")] + pub time_transform: ::core::option::Option, + /// These two fields are only relevant in the partition value case + #[prost(oneof = "artifact_binding_data::PartitionData", tags = "5, 6")] + pub partition_data: ::core::option::Option, +} +/// Nested message and enum types in `ArtifactBindingData`. +pub mod artifact_binding_data { + /// These two fields are only relevant in the partition value case + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum PartitionData { + #[prost(string, tag = "5")] + PartitionKey(::prost::alloc::string::String), + #[prost(bool, tag = "6")] + BindToTimePartition(bool), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimeTransform { + #[prost(string, tag = "1")] + pub transform: ::prost::alloc::string::String, + #[prost(enumeration = "Operator", tag = "2")] + pub op: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InputBindingData { + #[prost(string, tag = "1")] + pub var: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RuntimeBinding {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LabelValue { + #[prost(oneof = "label_value::Value", tags = "1, 2, 3, 4, 5")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `LabelValue`. +pub mod label_value { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + /// The string static value is for use in the Partitions object + #[prost(string, tag = "1")] + StaticValue(::prost::alloc::string::String), + /// The time value is for use in the TimePartition case + #[prost(message, tag = "2")] + TimeValue(::prost_types::Timestamp), + #[prost(message, tag = "3")] + TriggeredBinding(super::ArtifactBindingData), + #[prost(message, tag = "4")] + InputBinding(super::InputBindingData), + #[prost(message, tag = "5")] + RuntimeBinding(super::RuntimeBinding), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Partitions { + #[prost(map = "string, message", tag = "1")] + pub value: ::std::collections::HashMap<::prost::alloc::string::String, LabelValue>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimePartition { + #[prost(message, optional, tag = "1")] + pub value: ::core::option::Option, + #[prost(enumeration = "Granularity", tag = "2")] + pub granularity: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactId { + #[prost(message, optional, tag = "1")] + pub artifact_key: ::core::option::Option, + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + /// Think of a partition as a tag on an Artifact, except it's a key-value pair. + /// Different partitions naturally have different versions (execution ids). + #[prost(message, optional, tag = "3")] + pub partitions: ::core::option::Option, + /// There is no such thing as an empty time partition - if it's not set, then there is no time partition. + #[prost(message, optional, tag = "4")] + pub time_partition: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactTag { + #[prost(message, optional, tag = "1")] + pub artifact_key: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option, +} +/// Uniqueness constraints for Artifacts +/// - project, domain, name, version, partitions +/// Option 2 (tags are standalone, point to an individual artifact id): +/// - project, domain, name, alias (points to one partition if partitioned) +/// - project, domain, name, partition key, partition value +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactQuery { + #[prost(oneof = "artifact_query::Identifier", tags = "1, 2, 3, 4")] + pub identifier: ::core::option::Option, +} +/// Nested message and enum types in `ArtifactQuery`. +pub mod artifact_query { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Identifier { + #[prost(message, tag = "1")] + ArtifactId(super::ArtifactId), + #[prost(message, tag = "2")] + ArtifactTag(super::ArtifactTag), + #[prost(string, tag = "3")] + Uri(::prost::alloc::string::String), + /// This is used in the trigger case, where a user specifies a value for an input that is one of the triggering + /// artifacts, or a partition value derived from a triggering artifact. + #[prost(message, tag = "4")] + Binding(super::ArtifactBindingData), + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Granularity { + Unset = 0, + Minute = 1, + Hour = 2, + /// default + Day = 3, + Month = 4, +} +impl Granularity { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Granularity::Unset => "UNSET", + Granularity::Minute => "MINUTE", + Granularity::Hour => "HOUR", + Granularity::Day => "DAY", + Granularity::Month => "MONTH", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNSET" => Some(Self::Unset), + "MINUTE" => Some(Self::Minute), + "HOUR" => Some(Self::Hour), + "DAY" => Some(Self::Day), + "MONTH" => Some(Self::Month), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Operator { + Minus = 0, + Plus = 1, +} +impl Operator { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Operator::Minus => "MINUS", + Operator::Plus => "PLUS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "MINUS" => Some(Self::Minus), + "PLUS" => Some(Self::Plus), + _ => None, + } + } +} +/// Defines a strongly typed variable. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Variable { + /// Variable literal type. + #[prost(message, optional, tag = "1")] + pub r#type: ::core::option::Option, + /// +optional string describing input variable + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// +optional This object allows the user to specify how Artifacts are created. + /// name, tag, partitions can be specified. The other fields (version and project/domain) are ignored. + #[prost(message, optional, tag = "3")] + pub artifact_partial_id: ::core::option::Option, + #[prost(message, optional, tag = "4")] + pub artifact_tag: ::core::option::Option, +} +/// A map of Variables +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VariableMap { + /// Defines a map of variable names to variables. + #[prost(map = "string, message", tag = "1")] + pub variables: ::std::collections::HashMap<::prost::alloc::string::String, Variable>, +} +/// Defines strongly typed inputs and outputs. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TypedInterface { + #[prost(message, optional, tag = "1")] + pub inputs: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub outputs: ::core::option::Option, +} +/// A parameter is used as input to a launch plan and has +/// the special ability to have a default value or mark itself as required. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Parameter { + /// +required Variable. Defines the type of the variable backing this parameter. + #[prost(message, optional, tag = "1")] + pub var: ::core::option::Option, + /// +optional + #[prost(oneof = "parameter::Behavior", tags = "2, 3, 4, 5")] + pub behavior: ::core::option::Option, +} +/// Nested message and enum types in `Parameter`. +pub mod parameter { + /// +optional + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Behavior { + /// Defines a default value that has to match the variable type defined. + #[prost(message, tag = "2")] + Default(super::Literal), + /// +optional, is this value required to be filled. + #[prost(bool, tag = "3")] + Required(bool), + /// This is an execution time search basically that should result in exactly one Artifact with a Type that + /// matches the type of the variable. + #[prost(message, tag = "4")] + ArtifactQuery(super::ArtifactQuery), + #[prost(message, tag = "5")] + ArtifactId(super::ArtifactId), + } +} +/// A map of Parameters. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParameterMap { + /// Defines a map of parameter names to parameters. + #[prost(map = "string, message", tag = "1")] + pub parameters: ::std::collections::HashMap< + ::prost::alloc::string::String, + Parameter, + >, +} +/// Secret encapsulates information about the secret a task needs to proceed. An environment variable +/// FLYTE_SECRETS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if +/// secrets are passed through environment variables. +/// FLYTE_SECRETS_DEFAULT_DIR will be passed to indicate the prefix of the path where secrets will be mounted if secrets +/// are passed through file mounts. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Secret { + /// The name of the secret group where to find the key referenced below. For K8s secrets, this should be the name of + /// the v1/secret object. For Confidant, this should be the Credential name. For Vault, this should be the secret name. + /// For AWS Secret Manager, this should be the name of the secret. + /// +required + #[prost(string, tag = "1")] + pub group: ::prost::alloc::string::String, + /// The group version to fetch. This is not supported in all secret management systems. It'll be ignored for the ones + /// that do not support it. + /// +optional + #[prost(string, tag = "2")] + pub group_version: ::prost::alloc::string::String, + /// The name of the secret to mount. This has to match an existing secret in the system. It's up to the implementation + /// of the secret management system to require case sensitivity. For K8s secrets, Confidant and Vault, this should + /// match one of the keys inside the secret. For AWS Secret Manager, it's ignored. + /// +optional + #[prost(string, tag = "3")] + pub key: ::prost::alloc::string::String, + /// mount_requirement is optional. Indicates where the secret has to be mounted. If provided, the execution will fail + /// if the underlying key management system cannot satisfy that requirement. If not provided, the default location + /// will depend on the key management system. + /// +optional + #[prost(enumeration = "secret::MountType", tag = "4")] + pub mount_requirement: i32, +} +/// Nested message and enum types in `Secret`. +pub mod secret { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum MountType { + /// Default case, indicates the client can tolerate either mounting options. + Any = 0, + /// ENV_VAR indicates the secret needs to be mounted as an environment variable. + EnvVar = 1, + /// FILE indicates the secret needs to be mounted as a file. + File = 2, + } + impl MountType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + MountType::Any => "ANY", + MountType::EnvVar => "ENV_VAR", + MountType::File => "FILE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ANY" => Some(Self::Any), + "ENV_VAR" => Some(Self::EnvVar), + "FILE" => Some(Self::File), + _ => None, + } + } + } +} +/// OAuth2Client encapsulates OAuth2 Client Credentials to be used when making calls on behalf of that task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OAuth2Client { + /// client_id is the public id for the client to use. The system will not perform any pre-auth validation that the + /// secret requested matches the client_id indicated here. + /// +required + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// client_secret is a reference to the secret used to authenticate the OAuth2 client. + /// +required + #[prost(message, optional, tag = "2")] + pub client_secret: ::core::option::Option, +} +/// Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the +/// right identity for the execution environment. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Identity { + /// iam_role references the fully qualified name of Identity & Access Management role to impersonate. + #[prost(string, tag = "1")] + pub iam_role: ::prost::alloc::string::String, + /// k8s_service_account references a kubernetes service account to impersonate. + #[prost(string, tag = "2")] + pub k8s_service_account: ::prost::alloc::string::String, + /// oauth2_client references an oauth2 client. Backend plugins can use this information to impersonate the client when + /// making external calls. + #[prost(message, optional, tag = "3")] + pub oauth2_client: ::core::option::Option, + /// execution_identity references the subject who makes the execution + #[prost(string, tag = "4")] + pub execution_identity: ::prost::alloc::string::String, +} +/// OAuth2TokenRequest encapsulates information needed to request an OAuth2 token. +/// FLYTE_TOKENS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if +/// tokens are passed through environment variables. +/// FLYTE_TOKENS_PATH_PREFIX will be passed to indicate the prefix of the path where secrets will be mounted if tokens +/// are passed through file mounts. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OAuth2TokenRequest { + /// name indicates a unique id for the token request within this task token requests. It'll be used as a suffix for + /// environment variables and as a filename for mounting tokens as files. + /// +required + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// type indicates the type of the request to make. Defaults to CLIENT_CREDENTIALS. + /// +required + #[prost(enumeration = "o_auth2_token_request::Type", tag = "2")] + pub r#type: i32, + /// client references the client_id/secret to use to request the OAuth2 token. + /// +required + #[prost(message, optional, tag = "3")] + pub client: ::core::option::Option, + /// idp_discovery_endpoint references the discovery endpoint used to retrieve token endpoint and other related + /// information. + /// +optional + #[prost(string, tag = "4")] + pub idp_discovery_endpoint: ::prost::alloc::string::String, + /// token_endpoint references the token issuance endpoint. If idp_discovery_endpoint is not provided, this parameter is + /// mandatory. + /// +optional + #[prost(string, tag = "5")] + pub token_endpoint: ::prost::alloc::string::String, +} +/// Nested message and enum types in `OAuth2TokenRequest`. +pub mod o_auth2_token_request { + /// Type of the token requested. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Type { + /// CLIENT_CREDENTIALS indicates a 2-legged OAuth token requested using client credentials. + ClientCredentials = 0, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::ClientCredentials => "CLIENT_CREDENTIALS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLIENT_CREDENTIALS" => Some(Self::ClientCredentials), + _ => None, + } + } + } +} +/// SecurityContext holds security attributes that apply to tasks. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityContext { + /// run_as encapsulates the identity a pod should run as. If the task fills in multiple fields here, it'll be up to the + /// backend plugin to choose the appropriate identity for the execution engine the task will run on. + #[prost(message, optional, tag = "1")] + pub run_as: ::core::option::Option, + /// secrets indicate the list of secrets the task needs in order to proceed. Secrets will be mounted/passed to the + /// pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS + /// Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access + /// to the secret) and to pass it to the remote execution engine. + #[prost(message, repeated, tag = "2")] + pub secrets: ::prost::alloc::vec::Vec, + /// tokens indicate the list of token requests the task needs in order to proceed. Tokens will be mounted/passed to the + /// pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS + /// Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access + /// to the secret) and to pass it to the remote execution engine. + #[prost(message, repeated, tag = "3")] + pub tokens: ::prost::alloc::vec::Vec, +} +/// A customizable interface to convey resources requested for a container. This can be interpreted differently for different +/// container engines. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Resources { + /// The desired set of resources requested. ResourceNames must be unique within the list. + #[prost(message, repeated, tag = "1")] + pub requests: ::prost::alloc::vec::Vec, + /// Defines a set of bounds (e.g. min/max) within which the task can reliably run. ResourceNames must be unique + /// within the list. + #[prost(message, repeated, tag = "2")] + pub limits: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `Resources`. +pub mod resources { + /// Encapsulates a resource name and value. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ResourceEntry { + /// Resource name. + #[prost(enumeration = "ResourceName", tag = "1")] + pub name: i32, + /// Value must be a valid k8s quantity. See + /// + #[prost(string, tag = "2")] + pub value: ::prost::alloc::string::String, + } + /// Known resource names. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum ResourceName { + Unknown = 0, + Cpu = 1, + Gpu = 2, + Memory = 3, + Storage = 4, + /// For Kubernetes-based deployments, pods use ephemeral local storage for scratch space, caching, and for logs. + EphemeralStorage = 5, + } + impl ResourceName { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ResourceName::Unknown => "UNKNOWN", + ResourceName::Cpu => "CPU", + ResourceName::Gpu => "GPU", + ResourceName::Memory => "MEMORY", + ResourceName::Storage => "STORAGE", + ResourceName::EphemeralStorage => "EPHEMERAL_STORAGE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "CPU" => Some(Self::Cpu), + "GPU" => Some(Self::Gpu), + "MEMORY" => Some(Self::Memory), + "STORAGE" => Some(Self::Storage), + "EPHEMERAL_STORAGE" => Some(Self::EphemeralStorage), + _ => None, + } + } + } +} +/// Metadata associated with the GPU accelerator to allocate to a task. Contains +/// information about device type, and for multi-instance GPUs, the partition size to +/// use. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GpuAccelerator { + /// This can be any arbitrary string, and should be informed by the labels or taints + /// associated with the nodes in question. Default cloud provider labels typically + /// use the following values: `nvidia-tesla-t4`, `nvidia-tesla-a100`, etc. + #[prost(string, tag = "1")] + pub device: ::prost::alloc::string::String, + #[prost(oneof = "gpu_accelerator::PartitionSizeValue", tags = "2, 3")] + pub partition_size_value: ::core::option::Option< + gpu_accelerator::PartitionSizeValue, + >, +} +/// Nested message and enum types in `GPUAccelerator`. +pub mod gpu_accelerator { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum PartitionSizeValue { + #[prost(bool, tag = "2")] + Unpartitioned(bool), + /// Like `device`, this can be any arbitrary string, and should be informed by + /// the labels or taints associated with the nodes in question. Default cloud + /// provider labels typically use the following values: `1g.5gb`, `2g.10gb`, etc. + #[prost(string, tag = "3")] + PartitionSize(::prost::alloc::string::String), + } +} +/// Encapsulates all non-standard resources, not captured by v1.ResourceRequirements, to +/// allocate to a task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExtendedResources { + /// GPU accelerator to select for task. Contains information about device type, and + /// for multi-instance GPUs, the partition size to use. + #[prost(message, optional, tag = "1")] + pub gpu_accelerator: ::core::option::Option, +} +/// Runtime information. This is loosely defined to allow for extensibility. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RuntimeMetadata { + /// Type of runtime. + #[prost(enumeration = "runtime_metadata::RuntimeType", tag = "1")] + pub r#type: i32, + /// Version of the runtime. All versions should be backward compatible. However, certain cases call for version + /// checks to ensure tighter validation or setting expectations. + #[prost(string, tag = "2")] + pub version: ::prost::alloc::string::String, + /// +optional It can be used to provide extra information about the runtime (e.g. python, golang... etc.). + #[prost(string, tag = "3")] + pub flavor: ::prost::alloc::string::String, +} +/// Nested message and enum types in `RuntimeMetadata`. +pub mod runtime_metadata { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum RuntimeType { + Other = 0, + FlyteSdk = 1, + } + impl RuntimeType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RuntimeType::Other => "OTHER", + RuntimeType::FlyteSdk => "FLYTE_SDK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "OTHER" => Some(Self::Other), + "FLYTE_SDK" => Some(Self::FlyteSdk), + _ => None, + } + } + } +} +/// Task Metadata +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskMetadata { + /// Indicates whether the system should attempt to lookup this task's output to avoid duplication of work. + #[prost(bool, tag = "1")] + pub discoverable: bool, + /// Runtime information about the task. + #[prost(message, optional, tag = "2")] + pub runtime: ::core::option::Option, + /// The overall timeout of a task including user-triggered retries. + #[prost(message, optional, tag = "4")] + pub timeout: ::core::option::Option<::prost_types::Duration>, + /// Number of retries per task. + #[prost(message, optional, tag = "5")] + pub retries: ::core::option::Option, + /// Indicates a logical version to apply to this task for the purpose of discovery. + #[prost(string, tag = "6")] + pub discovery_version: ::prost::alloc::string::String, + /// If set, this indicates that this task is deprecated. This will enable owners of tasks to notify consumers + /// of the ending of support for a given task. + #[prost(string, tag = "7")] + pub deprecated_error_message: ::prost::alloc::string::String, + /// Indicates whether the system should attempt to execute discoverable instances in serial to avoid duplicate work + #[prost(bool, tag = "9")] + pub cache_serializable: bool, + /// Indicates whether the task will generate a Deck URI when it finishes executing. + #[prost(bool, tag = "10")] + pub generates_deck: bool, + /// Arbitrary tags that allow users and the platform to store small but arbitrary labels + #[prost(map = "string, string", tag = "11")] + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// pod_template_name is the unique name of a PodTemplate k8s resource to be used as the base configuration if this + /// task creates a k8s Pod. If this value is set, the specified PodTemplate will be used instead of, but applied + /// identically as, the default PodTemplate configured in FlytePropeller. + #[prost(string, tag = "12")] + pub pod_template_name: ::prost::alloc::string::String, + /// cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. + #[prost(string, repeated, tag = "13")] + pub cache_ignore_input_vars: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, + /// Identify whether task is interruptible + #[prost(oneof = "task_metadata::InterruptibleValue", tags = "8")] + pub interruptible_value: ::core::option::Option, +} +/// Nested message and enum types in `TaskMetadata`. +pub mod task_metadata { + /// Identify whether task is interruptible + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InterruptibleValue { + #[prost(bool, tag = "8")] + Interruptible(bool), + } +} +/// A Task structure that uniquely identifies a task in the system +/// Tasks are registered as a first step in the system. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskTemplate { + /// Auto generated taskId by the system. Task Id uniquely identifies this task globally. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// A predefined yet extensible Task type identifier. This can be used to customize any of the components. If no + /// extensions are provided in the system, Flyte will resolve the this task to its TaskCategory and default the + /// implementation registered for the TaskCategory. + #[prost(string, tag = "2")] + pub r#type: ::prost::alloc::string::String, + /// Extra metadata about the task. + #[prost(message, optional, tag = "3")] + pub metadata: ::core::option::Option, + /// A strongly typed interface for the task. This enables others to use this task within a workflow and guarantees + /// compile-time validation of the workflow to avoid costly runtime failures. + #[prost(message, optional, tag = "4")] + pub interface: ::core::option::Option, + /// Custom data about the task. This is extensible to allow various plugins in the system. + #[prost(message, optional, tag = "5")] + pub custom: ::core::option::Option<::prost_types::Struct>, + /// This can be used to customize task handling at execution time for the same task type. + #[prost(int32, tag = "7")] + pub task_type_version: i32, + /// security_context encapsulates security attributes requested to run this task. + #[prost(message, optional, tag = "8")] + pub security_context: ::core::option::Option, + /// Encapsulates all non-standard resources, not captured by + /// v1.ResourceRequirements, to allocate to a task. + #[prost(message, optional, tag = "9")] + pub extended_resources: ::core::option::Option, + /// Metadata about the custom defined for this task. This is extensible to allow various plugins in the system + /// to use as required. + /// reserve the field numbers 1 through 15 for very frequently occurring message elements + #[prost(map = "string, string", tag = "16")] + pub config: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. + /// If no corresponding execution-layer plugins are found, the system will default to handling these using built-in + /// handlers. + #[prost(oneof = "task_template::Target", tags = "6, 17, 18")] + pub target: ::core::option::Option, +} +/// Nested message and enum types in `TaskTemplate`. +pub mod task_template { + /// Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. + /// If no corresponding execution-layer plugins are found, the system will default to handling these using built-in + /// handlers. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Target { + #[prost(message, tag = "6")] + Container(super::Container), + #[prost(message, tag = "17")] + K8sPod(super::K8sPod), + #[prost(message, tag = "18")] + Sql(super::Sql), + } +} +/// Defines port properties for a container. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ContainerPort { + /// Number of port to expose on the pod's IP address. + /// This must be a valid port number, 0 < x < 65536. + #[prost(uint32, tag = "1")] + pub container_port: u32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Container { + /// Container image url. Eg: docker/redis:latest + #[prost(string, tag = "1")] + pub image: ::prost::alloc::string::String, + /// Command to be executed, if not provided, the default entrypoint in the container image will be used. + #[prost(string, repeated, tag = "2")] + pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// These will default to Flyte given paths. If provided, the system will not append known paths. If the task still + /// needs flyte's inputs and outputs path, add $(FLYTE_INPUT_FILE), $(FLYTE_OUTPUT_FILE) wherever makes sense and the + /// system will populate these before executing the container. + #[prost(string, repeated, tag = "3")] + pub args: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Container resources requirement as specified by the container engine. + #[prost(message, optional, tag = "4")] + pub resources: ::core::option::Option, + /// Environment variables will be set as the container is starting up. + #[prost(message, repeated, tag = "5")] + pub env: ::prost::alloc::vec::Vec, + /// Allows extra configs to be available for the container. + /// TODO: elaborate on how configs will become available. + /// Deprecated, please use TaskTemplate.config instead. + #[deprecated] + #[prost(message, repeated, tag = "6")] + pub config: ::prost::alloc::vec::Vec, + /// Ports to open in the container. This feature is not supported by all execution engines. (e.g. supported on K8s but + /// not supported on AWS Batch) + /// Only K8s + #[prost(message, repeated, tag = "7")] + pub ports: ::prost::alloc::vec::Vec, + /// BETA: Optional configuration for DataLoading. If not specified, then default values are used. + /// This makes it possible to to run a completely portable container, that uses inputs and outputs + /// only from the local file-system and without having any reference to flyteidl. This is supported only on K8s at the moment. + /// If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories + /// are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation + /// to understand the default paths. + /// Only K8s + #[prost(message, optional, tag = "9")] + pub data_config: ::core::option::Option, + #[prost(enumeration = "container::Architecture", tag = "10")] + pub architecture: i32, +} +/// Nested message and enum types in `Container`. +pub mod container { + /// Architecture-type the container image supports. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Architecture { + Unknown = 0, + Amd64 = 1, + Arm64 = 2, + ArmV6 = 3, + ArmV7 = 4, + } + impl Architecture { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Architecture::Unknown => "UNKNOWN", + Architecture::Amd64 => "AMD64", + Architecture::Arm64 => "ARM64", + Architecture::ArmV6 => "ARM_V6", + Architecture::ArmV7 => "ARM_V7", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "AMD64" => Some(Self::Amd64), + "ARM64" => Some(Self::Arm64), + "ARM_V6" => Some(Self::ArmV6), + "ARM_V7" => Some(Self::ArmV7), + _ => None, + } + } + } +} +/// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IoStrategy { + /// Mode to use to manage downloads + #[prost(enumeration = "io_strategy::DownloadMode", tag = "1")] + pub download_mode: i32, + /// Mode to use to manage uploads + #[prost(enumeration = "io_strategy::UploadMode", tag = "2")] + pub upload_mode: i32, +} +/// Nested message and enum types in `IOStrategy`. +pub mod io_strategy { + /// Mode to use for downloading + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum DownloadMode { + /// All data will be downloaded before the main container is executed + DownloadEager = 0, + /// Data will be downloaded as a stream and an End-Of-Stream marker will be written to indicate all data has been downloaded. Refer to protocol for details + DownloadStream = 1, + /// Large objects (offloaded) will not be downloaded + DoNotDownload = 2, + } + impl DownloadMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + DownloadMode::DownloadEager => "DOWNLOAD_EAGER", + DownloadMode::DownloadStream => "DOWNLOAD_STREAM", + DownloadMode::DoNotDownload => "DO_NOT_DOWNLOAD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DOWNLOAD_EAGER" => Some(Self::DownloadEager), + "DOWNLOAD_STREAM" => Some(Self::DownloadStream), + "DO_NOT_DOWNLOAD" => Some(Self::DoNotDownload), + _ => None, + } + } + } + /// Mode to use for uploading + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum UploadMode { + /// All data will be uploaded after the main container exits + UploadOnExit = 0, + /// Data will be uploaded as it appears. Refer to protocol specification for details + UploadEager = 1, + /// Data will not be uploaded, only references will be written + DoNotUpload = 2, + } + impl UploadMode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + UploadMode::UploadOnExit => "UPLOAD_ON_EXIT", + UploadMode::UploadEager => "UPLOAD_EAGER", + UploadMode::DoNotUpload => "DO_NOT_UPLOAD", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UPLOAD_ON_EXIT" => Some(Self::UploadOnExit), + "UPLOAD_EAGER" => Some(Self::UploadEager), + "DO_NOT_UPLOAD" => Some(Self::DoNotUpload), + _ => None, + } + } + } +} +/// This configuration allows executing raw containers in Flyte using the Flyte CoPilot system. +/// Flyte CoPilot, eliminates the needs of flytekit or sdk inside the container. Any inputs required by the users container are side-loaded in the input_path +/// Any outputs generated by the user container - within output_path are automatically uploaded. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DataLoadingConfig { + /// Flag enables DataLoading Config. If this is not set, data loading will not be used! + #[prost(bool, tag = "1")] + pub enabled: bool, + /// File system path (start at root). This folder will contain all the inputs exploded to a separate file. + /// Example, if the input interface needs (x: int, y: blob, z: multipart_blob) and the input path is '/var/flyte/inputs', then the file system will look like + /// /var/flyte/inputs/inputs. .pb .json .yaml> -> Format as defined previously. The Blob and Multipart blob will reference local filesystem instead of remote locations + /// /var/flyte/inputs/x -> X is a file that contains the value of x (integer) in string format + /// /var/flyte/inputs/y -> Y is a file in Binary format + /// /var/flyte/inputs/z/... -> Note Z itself is a directory + /// More information about the protocol - refer to docs #TODO reference docs here + #[prost(string, tag = "2")] + pub input_path: ::prost::alloc::string::String, + /// File system path (start at root). This folder should contain all the outputs for the task as individual files and/or an error text file + #[prost(string, tag = "3")] + pub output_path: ::prost::alloc::string::String, + /// In the inputs folder, there will be an additional summary/metadata file that contains references to all files or inlined primitive values. + /// This format decides the actual encoding for the data. Refer to the encoding to understand the specifics of the contents and the encoding + #[prost(enumeration = "data_loading_config::LiteralMapFormat", tag = "4")] + pub format: i32, + #[prost(message, optional, tag = "5")] + pub io_strategy: ::core::option::Option, +} +/// Nested message and enum types in `DataLoadingConfig`. +pub mod data_loading_config { + /// LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers. + /// If the user has access to the protocol buffer definitions, it is recommended to use the PROTO format. + /// JSON and YAML do not need any protobuf definitions to read it + /// All remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem) + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum LiteralMapFormat { + /// JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - + Json = 0, + Yaml = 1, + /// Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core + Proto = 2, + } + impl LiteralMapFormat { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LiteralMapFormat::Json => "JSON", + LiteralMapFormat::Yaml => "YAML", + LiteralMapFormat::Proto => "PROTO", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "JSON" => Some(Self::Json), + "YAML" => Some(Self::Yaml), + "PROTO" => Some(Self::Proto), + _ => None, + } + } + } +} +/// Defines a pod spec and additional pod metadata that is created when a task is executed. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct K8sPod { + /// Contains additional metadata for building a kubernetes pod. + #[prost(message, optional, tag = "1")] + pub metadata: ::core::option::Option, + /// Defines the primary pod spec created when a task is executed. + /// This should be a JSON-marshalled pod spec, which can be defined in + /// - go, using: + /// - python: using + #[prost(message, optional, tag = "2")] + pub pod_spec: ::core::option::Option<::prost_types::Struct>, + /// BETA: Optional configuration for DataLoading. If not specified, then default values are used. + /// This makes it possible to to run a completely portable container, that uses inputs and outputs + /// only from the local file-system and without having any reference to flytekit. This is supported only on K8s at the moment. + /// If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories + /// are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation + /// to understand the default paths. + /// Only K8s + #[prost(message, optional, tag = "3")] + pub data_config: ::core::option::Option, +} +/// Metadata for building a kubernetes object when a task is executed. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct K8sObjectMetadata { + /// Optional labels to add to the pod definition. + #[prost(map = "string, string", tag = "1")] + pub labels: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Optional annotations to add to the pod definition. + #[prost(map = "string, string", tag = "2")] + pub annotations: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Sql represents a generic sql workload with a statement and dialect. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Sql { + /// The actual query to run, the query can have templated parameters. + /// We use Flyte's Golang templating format for Query templating. + /// For example, + /// insert overwrite directory '{{ .rawOutputDataPrefix }}' stored as parquet + /// select * + /// from my_table + /// where ds = '{{ .Inputs.ds }}' + #[prost(string, tag = "1")] + pub statement: ::prost::alloc::string::String, + #[prost(enumeration = "sql::Dialect", tag = "2")] + pub dialect: i32, +} +/// Nested message and enum types in `Sql`. +pub mod sql { + /// The dialect of the SQL statement. This is used to validate and parse SQL statements at compilation time to avoid + /// expensive runtime operations. If set to an unsupported dialect, no validation will be done on the statement. + /// We support the following dialect: ansi, hive. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Dialect { + Undefined = 0, + Ansi = 1, + Hive = 2, + Other = 3, + } + impl Dialect { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Dialect::Undefined => "UNDEFINED", + Dialect::Ansi => "ANSI", + Dialect::Hive => "HIVE", + Dialect::Other => "OTHER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNDEFINED" => Some(Self::Undefined), + "ANSI" => Some(Self::Ansi), + "HIVE" => Some(Self::Hive), + "OTHER" => Some(Self::Other), + _ => None, + } + } + } +} +/// Defines a 2-level tree where the root is a comparison operator and Operands are primitives or known variables. +/// Each expression results in a boolean result. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ComparisonExpression { + #[prost(enumeration = "comparison_expression::Operator", tag = "1")] + pub operator: i32, + #[prost(message, optional, tag = "2")] + pub left_value: ::core::option::Option, + #[prost(message, optional, tag = "3")] + pub right_value: ::core::option::Option, +} +/// Nested message and enum types in `ComparisonExpression`. +pub mod comparison_expression { + /// Binary Operator for each expression + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Operator { + Eq = 0, + Neq = 1, + /// Greater Than + Gt = 2, + Gte = 3, + /// Less Than + Lt = 4, + Lte = 5, + } + impl Operator { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Operator::Eq => "EQ", + Operator::Neq => "NEQ", + Operator::Gt => "GT", + Operator::Gte => "GTE", + Operator::Lt => "LT", + Operator::Lte => "LTE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EQ" => Some(Self::Eq), + "NEQ" => Some(Self::Neq), + "GT" => Some(Self::Gt), + "GTE" => Some(Self::Gte), + "LT" => Some(Self::Lt), + "LTE" => Some(Self::Lte), + _ => None, + } + } + } +} +/// Defines an operand to a comparison expression. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Operand { + #[prost(oneof = "operand::Val", tags = "1, 2, 3")] + pub val: ::core::option::Option, +} +/// Nested message and enum types in `Operand`. +pub mod operand { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Val { + /// Can be a constant + #[prost(message, tag = "1")] + Primitive(super::Primitive), + /// Or one of this node's input variables + #[prost(string, tag = "2")] + Var(::prost::alloc::string::String), + /// Replace the primitive field + #[prost(message, tag = "3")] + Scalar(super::Scalar), + } +} +/// Defines a boolean expression tree. It can be a simple or a conjunction expression. +/// Multiple expressions can be combined using a conjunction or a disjunction to result in a final boolean result. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BooleanExpression { + #[prost(oneof = "boolean_expression::Expr", tags = "1, 2")] + pub expr: ::core::option::Option, +} +/// Nested message and enum types in `BooleanExpression`. +pub mod boolean_expression { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Expr { + #[prost(message, tag = "1")] + Conjunction(::prost::alloc::boxed::Box), + #[prost(message, tag = "2")] + Comparison(super::ComparisonExpression), + } +} +/// Defines a conjunction expression of two boolean expressions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConjunctionExpression { + #[prost(enumeration = "conjunction_expression::LogicalOperator", tag = "1")] + pub operator: i32, + #[prost(message, optional, boxed, tag = "2")] + pub left_expression: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, + #[prost(message, optional, boxed, tag = "3")] + pub right_expression: ::core::option::Option< + ::prost::alloc::boxed::Box, + >, +} +/// Nested message and enum types in `ConjunctionExpression`. +pub mod conjunction_expression { + /// Nested conditions. They can be conjoined using AND / OR + /// Order of evaluation is not important as the operators are Commutative + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum LogicalOperator { + /// Conjunction + And = 0, + Or = 1, + } + impl LogicalOperator { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + LogicalOperator::And => "AND", + LogicalOperator::Or => "OR", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "AND" => Some(Self::And), + "OR" => Some(Self::Or), + _ => None, + } + } + } +} +/// Defines a condition and the execution unit that should be executed if the condition is satisfied. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IfBlock { + #[prost(message, optional, tag = "1")] + pub condition: ::core::option::Option, + #[prost(message, optional, boxed, tag = "2")] + pub then_node: ::core::option::Option<::prost::alloc::boxed::Box>, +} +/// Defines a series of if/else blocks. The first branch whose condition evaluates to true is the one to execute. +/// If no conditions were satisfied, the else_node or the error will execute. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct IfElseBlock { + /// +required. First condition to evaluate. + #[prost(message, optional, boxed, tag = "1")] + pub case: ::core::option::Option<::prost::alloc::boxed::Box>, + /// +optional. Additional branches to evaluate. + #[prost(message, repeated, tag = "2")] + pub other: ::prost::alloc::vec::Vec, + /// +required. + #[prost(oneof = "if_else_block::Default", tags = "3, 4")] + pub default: ::core::option::Option, +} +/// Nested message and enum types in `IfElseBlock`. +pub mod if_else_block { + /// +required. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Default { + /// The node to execute in case none of the branches were taken. + #[prost(message, tag = "3")] + ElseNode(::prost::alloc::boxed::Box), + /// An error to throw in case none of the branches were taken. + #[prost(message, tag = "4")] + Error(super::Error), + } +} +/// BranchNode is a special node that alter the flow of the workflow graph. It allows the control flow to branch at +/// runtime based on a series of conditions that get evaluated on various parameters (e.g. inputs, primitives). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BranchNode { + /// +required + #[prost(message, optional, boxed, tag = "1")] + pub if_else: ::core::option::Option<::prost::alloc::boxed::Box>, +} +/// Refers to the task that the Node is to execute. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskNode { + /// Optional overrides applied at task execution time. + #[prost(message, optional, tag = "2")] + pub overrides: ::core::option::Option, + #[prost(oneof = "task_node::Reference", tags = "1")] + pub reference: ::core::option::Option, +} +/// Nested message and enum types in `TaskNode`. +pub mod task_node { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reference { + /// A globally unique identifier for the task. + #[prost(message, tag = "1")] + ReferenceId(super::Identifier), + } +} +/// Refers to a the workflow the node is to execute. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowNode { + #[prost(oneof = "workflow_node::Reference", tags = "1, 2")] + pub reference: ::core::option::Option, +} +/// Nested message and enum types in `WorkflowNode`. +pub mod workflow_node { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reference { + /// A globally unique identifier for the launch plan. + #[prost(message, tag = "1")] + LaunchplanRef(super::Identifier), + /// Reference to a subworkflow, that should be defined with the compiler context + #[prost(message, tag = "2")] + SubWorkflowRef(super::Identifier), + } +} +/// ApproveCondition represents a dependency on an external approval. During execution, this will manifest as a boolean +/// signal with the provided signal_id. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ApproveCondition { + /// A unique identifier for the requested boolean signal. + #[prost(string, tag = "1")] + pub signal_id: ::prost::alloc::string::String, +} +/// SignalCondition represents a dependency on an signal. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SignalCondition { + /// A unique identifier for the requested signal. + #[prost(string, tag = "1")] + pub signal_id: ::prost::alloc::string::String, + /// A type denoting the required value type for this signal. + #[prost(message, optional, tag = "2")] + pub r#type: ::core::option::Option, + /// The variable name for the signal value in this nodes outputs. + #[prost(string, tag = "3")] + pub output_variable_name: ::prost::alloc::string::String, +} +/// SleepCondition represents a dependency on waiting for the specified duration. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SleepCondition { + /// The overall duration for this sleep. + #[prost(message, optional, tag = "1")] + pub duration: ::core::option::Option<::prost_types::Duration>, +} +/// GateNode refers to the condition that is required for the gate to successfully complete. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GateNode { + #[prost(oneof = "gate_node::Condition", tags = "1, 2, 3")] + pub condition: ::core::option::Option, +} +/// Nested message and enum types in `GateNode`. +pub mod gate_node { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Condition { + /// ApproveCondition represents a dependency on an external approval provided by a boolean signal. + #[prost(message, tag = "1")] + Approve(super::ApproveCondition), + /// SignalCondition represents a dependency on an signal. + #[prost(message, tag = "2")] + Signal(super::SignalCondition), + /// SleepCondition represents a dependency on waiting for the specified duration. + #[prost(message, tag = "3")] + Sleep(super::SleepCondition), + } +} +/// ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input +/// values. An ArrayNode can be executed with configurable parallelism (separate from the parent +/// workflow) and can be configured to succeed when a certain number of sub-nodes succeed. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayNode { + /// node is the sub-node that will be executed for each element in the array. + #[prost(message, optional, boxed, tag = "1")] + pub node: ::core::option::Option<::prost::alloc::boxed::Box>, + /// parallelism defines the minimum number of instances to bring up concurrently at any given + /// point. Note that this is an optimistic restriction and that, due to network partitioning or + /// other failures, the actual number of currently running instances might be more. This has to + /// be a positive number if assigned. Default value is size. + #[prost(uint32, tag = "2")] + pub parallelism: u32, + #[prost(oneof = "array_node::SuccessCriteria", tags = "3, 4")] + pub success_criteria: ::core::option::Option, +} +/// Nested message and enum types in `ArrayNode`. +pub mod array_node { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SuccessCriteria { + /// min_successes is an absolute number of the minimum number of successful completions of + /// sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful + /// and outputs will be computed. This has to be a non-negative number if assigned. Default + /// value is size (if specified). + #[prost(uint32, tag = "3")] + MinSuccesses(u32), + /// If the array job size is not known beforehand, the min_success_ratio can instead be used + /// to determine when an ArrayNode can be marked successful. + #[prost(float, tag = "4")] + MinSuccessRatio(f32), + } +} +/// Defines extra information about the Node. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeMetadata { + /// A friendly name for the Node + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The overall timeout of a task. + #[prost(message, optional, tag = "4")] + pub timeout: ::core::option::Option<::prost_types::Duration>, + /// Number of retries per task. + #[prost(message, optional, tag = "5")] + pub retries: ::core::option::Option, + /// Identify whether node is interruptible + #[prost(oneof = "node_metadata::InterruptibleValue", tags = "6")] + pub interruptible_value: ::core::option::Option, + /// Identify whether a node should have it's outputs cached. + #[prost(oneof = "node_metadata::CacheableValue", tags = "7")] + pub cacheable_value: ::core::option::Option, + /// The version of the cache to use. + #[prost(oneof = "node_metadata::CacheVersionValue", tags = "8")] + pub cache_version_value: ::core::option::Option, + /// Identify whether caching operations involving this node should be serialized. + #[prost(oneof = "node_metadata::CacheSerializableValue", tags = "9")] + pub cache_serializable_value: ::core::option::Option< + node_metadata::CacheSerializableValue, + >, +} +/// Nested message and enum types in `NodeMetadata`. +pub mod node_metadata { + /// Identify whether node is interruptible + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InterruptibleValue { + #[prost(bool, tag = "6")] + Interruptible(bool), + } + /// Identify whether a node should have it's outputs cached. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum CacheableValue { + #[prost(bool, tag = "7")] + Cacheable(bool), + } + /// The version of the cache to use. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum CacheVersionValue { + #[prost(string, tag = "8")] + CacheVersion(::prost::alloc::string::String), + } + /// Identify whether caching operations involving this node should be serialized. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum CacheSerializableValue { + #[prost(bool, tag = "9")] + CacheSerializable(bool), + } +} +/// Links a variable to an alias. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Alias { + /// Must match one of the output variable names on a node. + #[prost(string, tag = "1")] + pub var: ::prost::alloc::string::String, + /// A workflow-level unique alias that downstream nodes can refer to in their input. + #[prost(string, tag = "2")] + pub alias: ::prost::alloc::string::String, +} +/// A Workflow graph Node. One unit of execution in the graph. Each node can be linked to a Task, a Workflow or a branch +/// node. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Node { + /// A workflow-level unique identifier that identifies this node in the workflow. 'inputs' and 'outputs' are reserved + /// node ids that cannot be used by other nodes. + #[prost(string, tag = "1")] + pub id: ::prost::alloc::string::String, + /// Extra metadata about the node. + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, + /// Specifies how to bind the underlying interface's inputs. All required inputs specified in the underlying interface + /// must be fulfilled. + #[prost(message, repeated, tag = "3")] + pub inputs: ::prost::alloc::vec::Vec, + /// +optional Specifies execution dependency for this node ensuring it will only get scheduled to run after all its + /// upstream nodes have completed. This node will have an implicit dependency on any node that appears in inputs + /// field. + #[prost(string, repeated, tag = "4")] + pub upstream_node_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// +optional. A node can define aliases for a subset of its outputs. This is particularly useful if different nodes + /// need to conform to the same interface (e.g. all branches in a branch node). Downstream nodes must refer to this + /// nodes outputs using the alias if one's specified. + #[prost(message, repeated, tag = "5")] + pub output_aliases: ::prost::alloc::vec::Vec, + /// Information about the target to execute in this node. + #[prost(oneof = "node::Target", tags = "6, 7, 8, 9, 10")] + pub target: ::core::option::Option, +} +/// Nested message and enum types in `Node`. +pub mod node { + /// Information about the target to execute in this node. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Target { + /// Information about the Task to execute in this node. + #[prost(message, tag = "6")] + TaskNode(super::TaskNode), + /// Information about the Workflow to execute in this mode. + #[prost(message, tag = "7")] + WorkflowNode(super::WorkflowNode), + /// Information about the branch node to evaluate in this node. + #[prost(message, tag = "8")] + BranchNode(::prost::alloc::boxed::Box), + /// Information about the condition to evaluate in this node. + #[prost(message, tag = "9")] + GateNode(super::GateNode), + /// Information about the sub-node executions for each value in the list of this nodes + /// inputs values. + #[prost(message, tag = "10")] + ArrayNode(::prost::alloc::boxed::Box), + } +} +/// This is workflow layer metadata. These settings are only applicable to the workflow as a whole, and do not +/// percolate down to child entities (like tasks) launched by the workflow. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowMetadata { + /// Indicates the runtime priority of workflow executions. + #[prost(message, optional, tag = "1")] + pub quality_of_service: ::core::option::Option, + /// Defines how the system should behave when a failure is detected in the workflow execution. + #[prost(enumeration = "workflow_metadata::OnFailurePolicy", tag = "2")] + pub on_failure: i32, + /// Arbitrary tags that allow users and the platform to store small but arbitrary labels + #[prost(map = "string, string", tag = "3")] + pub tags: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Nested message and enum types in `WorkflowMetadata`. +pub mod workflow_metadata { + /// Failure Handling Strategy + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum OnFailurePolicy { + /// FAIL_IMMEDIATELY instructs the system to fail as soon as a node fails in the workflow. It'll automatically + /// abort all currently running nodes and clean up resources before finally marking the workflow executions as + /// failed. + FailImmediately = 0, + /// FAIL_AFTER_EXECUTABLE_NODES_COMPLETE instructs the system to make as much progress as it can. The system will + /// not alter the dependencies of the execution graph so any node that depend on the failed node will not be run. + /// Other nodes that will be executed to completion before cleaning up resources and marking the workflow + /// execution as failed. + FailAfterExecutableNodesComplete = 1, + } + impl OnFailurePolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + OnFailurePolicy::FailImmediately => "FAIL_IMMEDIATELY", + OnFailurePolicy::FailAfterExecutableNodesComplete => { + "FAIL_AFTER_EXECUTABLE_NODES_COMPLETE" + } + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FAIL_IMMEDIATELY" => Some(Self::FailImmediately), + "FAIL_AFTER_EXECUTABLE_NODES_COMPLETE" => { + Some(Self::FailAfterExecutableNodesComplete) + } + _ => None, + } + } + } +} +/// The difference between these settings and the WorkflowMetadata ones is that these are meant to be passed down to +/// a workflow's underlying entities (like tasks). For instance, 'interruptible' has no meaning at the workflow layer, it +/// is only relevant when a task executes. The settings here are the defaults that are passed to all nodes +/// unless explicitly overridden at the node layer. +/// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be +/// added to both this object and the WorkflowMetadata object above. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowMetadataDefaults { + /// Whether child nodes of the workflow are interruptible. + #[prost(bool, tag = "1")] + pub interruptible: bool, +} +/// Flyte Workflow Structure that encapsulates task, branch and subworkflow nodes to form a statically analyzable, +/// directed acyclic graph. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowTemplate { + /// A globally unique identifier for the workflow. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Extra metadata about the workflow. + #[prost(message, optional, tag = "2")] + pub metadata: ::core::option::Option, + /// Defines a strongly typed interface for the Workflow. This can include some optional parameters. + #[prost(message, optional, tag = "3")] + pub interface: ::core::option::Option, + /// A list of nodes. In addition, 'globals' is a special reserved node id that can be used to consume workflow inputs. + #[prost(message, repeated, tag = "4")] + pub nodes: ::prost::alloc::vec::Vec, + /// A list of output bindings that specify how to construct workflow outputs. Bindings can pull node outputs or + /// specify literals. All workflow outputs specified in the interface field must be bound in order for the workflow + /// to be validated. A workflow has an implicit dependency on all of its nodes to execute successfully in order to + /// bind final outputs. + /// Most of these outputs will be Binding's with a BindingData of type OutputReference. That is, your workflow can + /// just have an output of some constant (`Output(5)`), but usually, the workflow will be pulling + /// outputs from the output of a task. + #[prost(message, repeated, tag = "5")] + pub outputs: ::prost::alloc::vec::Vec, + /// +optional A catch-all node. This node is executed whenever the execution engine determines the workflow has failed. + /// The interface of this node must match the Workflow interface with an additional input named 'error' of type + /// pb.lyft.flyte.core.Error. + #[prost(message, optional, tag = "6")] + pub failure_node: ::core::option::Option, + /// workflow defaults + #[prost(message, optional, tag = "7")] + pub metadata_defaults: ::core::option::Option, +} +/// Optional task node overrides that will be applied at task execution time. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskNodeOverrides { + /// A customizable interface to convey resources requested for a task container. + #[prost(message, optional, tag = "1")] + pub resources: ::core::option::Option, + /// Overrides for all non-standard resources, not captured by + /// v1.ResourceRequirements, to allocate to a task. + #[prost(message, optional, tag = "2")] + pub extended_resources: ::core::option::Option, + /// Override for the image used by task pods. + #[prost(string, tag = "3")] + pub container_image: ::prost::alloc::string::String, +} +/// A structure that uniquely identifies a launch plan in the system. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct LaunchPlanTemplate { + /// A globally unique identifier for the launch plan. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// The input and output interface for the launch plan + #[prost(message, optional, tag = "2")] + pub interface: ::core::option::Option, + /// A collection of input literals that are fixed for the launch plan + #[prost(message, optional, tag = "3")] + pub fixed_inputs: ::core::option::Option, +} +/// Span represents a duration trace of Flyte execution. The id field denotes a Flyte execution entity or an operation +/// which uniquely identifies the Span. The spans attribute allows this Span to be further broken down into more +/// precise definitions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Span { + /// start_time defines the instance this span began. + #[prost(message, optional, tag = "1")] + pub start_time: ::core::option::Option<::prost_types::Timestamp>, + /// end_time defines the instance this span completed. + #[prost(message, optional, tag = "2")] + pub end_time: ::core::option::Option<::prost_types::Timestamp>, + /// spans defines a collection of Spans that breakdown this execution. + #[prost(message, repeated, tag = "7")] + pub spans: ::prost::alloc::vec::Vec, + #[prost(oneof = "span::Id", tags = "3, 4, 5, 6")] + pub id: ::core::option::Option, +} +/// Nested message and enum types in `Span`. +pub mod span { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Id { + /// workflow_id is the id of the workflow execution this Span represents. + #[prost(message, tag = "3")] + WorkflowId(super::WorkflowExecutionIdentifier), + /// node_id is the id of the node execution this Span represents. + #[prost(message, tag = "4")] + NodeId(super::NodeExecutionIdentifier), + /// task_id is the id of the task execution this Span represents. + #[prost(message, tag = "5")] + TaskId(super::TaskExecutionIdentifier), + /// operation_id is the id of a unique operation that this Span represents. + #[prost(string, tag = "6")] + OperationId(::prost::alloc::string::String), + } +} +/// ExecutionMetrics is a collection of metrics that are collected during the execution of a Flyte task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExecutionMetricResult { + /// The metric this data represents. e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG. + #[prost(string, tag = "1")] + pub metric: ::prost::alloc::string::String, + /// The result data in prometheus range query result format + /// + /// This may include multiple time series, differentiated by their metric labels. + /// Start time is greater of (execution attempt start, 48h ago) + /// End time is lesser of (execution attempt end, now) + #[prost(message, optional, tag = "2")] + pub data: ::core::option::Option<::prost_types::Struct>, +} +/// Adjacency list for the workflow. This is created as part of the compilation process. Every process after the compilation +/// step uses this created ConnectionSet +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ConnectionSet { + /// A list of all the node ids that are downstream from a given node id + #[prost(map = "string, message", tag = "7")] + pub downstream: ::std::collections::HashMap< + ::prost::alloc::string::String, + connection_set::IdList, + >, + /// A list of all the node ids, that are upstream of this node id + #[prost(map = "string, message", tag = "8")] + pub upstream: ::std::collections::HashMap< + ::prost::alloc::string::String, + connection_set::IdList, + >, +} +/// Nested message and enum types in `ConnectionSet`. +pub mod connection_set { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct IdList { + #[prost(string, repeated, tag = "1")] + pub ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// Output of the compilation Step. This object represents one workflow. We store more metadata at this layer +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompiledWorkflow { + /// Completely contained Workflow Template + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, + /// For internal use only! This field is used by the system and must not be filled in. Any values set will be ignored. + #[prost(message, optional, tag = "2")] + pub connections: ::core::option::Option, +} +/// Output of the compilation step. This object represents one LaunchPlan. We store more metadata at this layer +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompiledLaunchPlan { + /// Completely contained LaunchPlan Template + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, +} +/// Output of the Compilation step. This object represent one Task. We store more metadata at this layer +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompiledTask { + /// Completely contained TaskTemplate + #[prost(message, optional, tag = "1")] + pub template: ::core::option::Option, +} +/// A Compiled Workflow Closure contains all the information required to start a new execution, or to visualize a workflow +/// and its details. The CompiledWorkflowClosure should always contain a primary workflow, that is the main workflow that +/// will being the execution. All subworkflows are denormalized. WorkflowNodes refer to the workflow identifiers of +/// compiled subworkflows. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CompiledWorkflowClosure { + /// +required + #[prost(message, optional, tag = "1")] + pub primary: ::core::option::Option, + /// Guaranteed that there will only exist one and only one workflow with a given id, i.e., every sub workflow has a + /// unique identifier. Also every enclosed subworkflow is used either by a primary workflow or by a subworkflow + /// as an inlined workflow + /// +optional + #[prost(message, repeated, tag = "2")] + pub sub_workflows: ::prost::alloc::vec::Vec, + /// Guaranteed that there will only exist one and only one task with a given id, i.e., every task has a unique id + /// +required (at least 1) + #[prost(message, repeated, tag = "3")] + pub tasks: ::prost::alloc::vec::Vec, + /// A collection of launch plans that are compiled. Guaranteed that there will only exist one and only one launch plan + /// with a given id, i.e., every launch plan has a unique id. + #[prost(message, repeated, tag = "4")] + pub launch_plans: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CatalogArtifactTag { + /// Artifact ID is generated name + #[prost(string, tag = "1")] + pub artifact_id: ::prost::alloc::string::String, + /// Flyte computes the tag automatically, as the hash of the values + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, +} +/// Catalog artifact information with specific metadata +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CatalogMetadata { + /// Dataset ID in the catalog + #[prost(message, optional, tag = "1")] + pub dataset_id: ::core::option::Option, + /// Artifact tag in the catalog + #[prost(message, optional, tag = "2")] + pub artifact_tag: ::core::option::Option, + /// Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context + #[prost(oneof = "catalog_metadata::SourceExecution", tags = "3")] + pub source_execution: ::core::option::Option, +} +/// Nested message and enum types in `CatalogMetadata`. +pub mod catalog_metadata { + /// Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SourceExecution { + /// Today we only support TaskExecutionIdentifier as a source, as catalog caching only works for task executions + #[prost(message, tag = "3")] + SourceTaskExecution(super::TaskExecutionIdentifier), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CatalogReservation {} +/// Nested message and enum types in `CatalogReservation`. +pub mod catalog_reservation { + /// Indicates the status of a catalog reservation operation. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Status { + /// Used to indicate that reservations are disabled + ReservationDisabled = 0, + /// Used to indicate that a reservation was successfully acquired or extended + ReservationAcquired = 1, + /// Used to indicate that an active reservation currently exists + ReservationExists = 2, + /// Used to indicate that the reservation has been successfully released + ReservationReleased = 3, + /// Used to indicate that a reservation operation resulted in failure + ReservationFailure = 4, + } + impl Status { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Status::ReservationDisabled => "RESERVATION_DISABLED", + Status::ReservationAcquired => "RESERVATION_ACQUIRED", + Status::ReservationExists => "RESERVATION_EXISTS", + Status::ReservationReleased => "RESERVATION_RELEASED", + Status::ReservationFailure => "RESERVATION_FAILURE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESERVATION_DISABLED" => Some(Self::ReservationDisabled), + "RESERVATION_ACQUIRED" => Some(Self::ReservationAcquired), + "RESERVATION_EXISTS" => Some(Self::ReservationExists), + "RESERVATION_RELEASED" => Some(Self::ReservationReleased), + "RESERVATION_FAILURE" => Some(Self::ReservationFailure), + _ => None, + } + } + } +} +/// Indicates the status of CatalogCaching. The reason why this is not embedded in TaskNodeMetadata is, that we may use for other types of nodes as well in the future +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CatalogCacheStatus { + /// Used to indicate that caching was disabled + CacheDisabled = 0, + /// Used to indicate that the cache lookup resulted in no matches + CacheMiss = 1, + /// used to indicate that the associated artifact was a result of a previous execution + CacheHit = 2, + /// used to indicate that the resultant artifact was added to the cache + CachePopulated = 3, + /// Used to indicate that cache lookup failed because of an error + CacheLookupFailure = 4, + /// Used to indicate that cache lookup failed because of an error + CachePutFailure = 5, + /// Used to indicate the cache lookup was skipped + CacheSkipped = 6, + /// Used to indicate that the cache was evicted + CacheEvicted = 7, +} +impl CatalogCacheStatus { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CatalogCacheStatus::CacheDisabled => "CACHE_DISABLED", + CatalogCacheStatus::CacheMiss => "CACHE_MISS", + CatalogCacheStatus::CacheHit => "CACHE_HIT", + CatalogCacheStatus::CachePopulated => "CACHE_POPULATED", + CatalogCacheStatus::CacheLookupFailure => "CACHE_LOOKUP_FAILURE", + CatalogCacheStatus::CachePutFailure => "CACHE_PUT_FAILURE", + CatalogCacheStatus::CacheSkipped => "CACHE_SKIPPED", + CatalogCacheStatus::CacheEvicted => "CACHE_EVICTED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CACHE_DISABLED" => Some(Self::CacheDisabled), + "CACHE_MISS" => Some(Self::CacheMiss), + "CACHE_HIT" => Some(Self::CacheHit), + "CACHE_POPULATED" => Some(Self::CachePopulated), + "CACHE_LOOKUP_FAILURE" => Some(Self::CacheLookupFailure), + "CACHE_PUT_FAILURE" => Some(Self::CachePutFailure), + "CACHE_SKIPPED" => Some(Self::CacheSkipped), + "CACHE_EVICTED" => Some(Self::CacheEvicted), + _ => None, + } + } +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs new file mode 100644 index 0000000000..46ec0ec406 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs @@ -0,0 +1,398 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionEvent { + /// Workflow execution id + #[prost(message, optional, tag = "1")] + pub execution_id: ::core::option::Option, + /// the id of the originator (Propeller) of the event + #[prost(string, tag = "2")] + pub producer_id: ::prost::alloc::string::String, + #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "3")] + pub phase: i32, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the workflow. + #[prost(message, optional, tag = "4")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, + #[prost(oneof = "workflow_execution_event::OutputResult", tags = "5, 6, 7")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `WorkflowExecutionEvent`. +pub mod workflow_execution_event { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URL to the output of the execution, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag = "5")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag = "6")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this workflow execution. + #[prost(message, tag = "7")] + OutputData(super::super::core::LiteralMap), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionEvent { + /// Unique identifier for this node execution + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// the id of the originator (Propeller) of the event + #[prost(string, tag = "2")] + pub producer_id: ::prost::alloc::string::String, + #[prost(enumeration = "super::core::node_execution::Phase", tag = "3")] + pub phase: i32, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the node. + #[prost(message, optional, tag = "4")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, + /// \[To be deprecated\] Specifies which task (if any) launched this node. + #[prost(message, optional, tag = "9")] + pub parent_task_metadata: ::core::option::Option, + /// Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + #[prost(message, optional, tag = "10")] + pub parent_node_metadata: ::core::option::Option, + /// Retry group to indicate grouping of nodes by retries + #[prost(string, tag = "11")] + pub retry_group: ::prost::alloc::string::String, + /// Identifier of the node in the original workflow/graph + /// This maps to value of WorkflowTemplate.nodes\[X\].id + #[prost(string, tag = "12")] + pub spec_node_id: ::prost::alloc::string::String, + /// Friendly readable name for the node + #[prost(string, tag = "13")] + pub node_name: ::prost::alloc::string::String, + #[prost(int32, tag = "16")] + pub event_version: i32, + /// Whether this node launched a subworkflow. + #[prost(bool, tag = "17")] + pub is_parent: bool, + /// Whether this node yielded a dynamic workflow. + #[prost(bool, tag = "18")] + pub is_dynamic: bool, + /// String location uniquely identifying where the deck HTML file is + /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + #[prost(string, tag = "19")] + pub deck_uri: ::prost::alloc::string::String, + /// This timestamp represents the instant when the event was reported by the executing framework. For example, + /// when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + /// literal inputs are initially copied. The event however will not be sent until after the copy completes. + /// Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + #[prost(message, optional, tag = "21")] + pub reported_at: ::core::option::Option<::prost_types::Timestamp>, + /// Indicates if this node is an ArrayNode. + #[prost(bool, tag = "22")] + pub is_array: bool, + #[prost(oneof = "node_execution_event::InputValue", tags = "5, 20")] + pub input_value: ::core::option::Option, + #[prost(oneof = "node_execution_event::OutputResult", tags = "6, 7, 15")] + pub output_result: ::core::option::Option, + /// Additional metadata to do with this event's node target based + /// on the node type + #[prost(oneof = "node_execution_event::TargetMetadata", tags = "8, 14")] + pub target_metadata: ::core::option::Option, +} +/// Nested message and enum types in `NodeExecutionEvent`. +pub mod node_execution_event { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputValue { + #[prost(string, tag = "5")] + InputUri(::prost::alloc::string::String), + /// Raw input data consumed by this node execution. + #[prost(message, tag = "20")] + InputData(super::super::core::LiteralMap), + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URL to the output of the execution, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag = "6")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag = "7")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this node execution. + #[prost(message, tag = "15")] + OutputData(super::super::core::LiteralMap), + } + /// Additional metadata to do with this event's node target based + /// on the node type + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum TargetMetadata { + #[prost(message, tag = "8")] + WorkflowNodeMetadata(super::WorkflowNodeMetadata), + #[prost(message, tag = "14")] + TaskNodeMetadata(super::TaskNodeMetadata), + } +} +/// For Workflow Nodes we need to send information about the workflow that's launched +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowNodeMetadata { + #[prost(message, optional, tag = "1")] + pub execution_id: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskNodeMetadata { + /// Captures the status of caching for this execution. + #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "1")] + pub cache_status: i32, + /// This structure carries the catalog artifact information + #[prost(message, optional, tag = "2")] + pub catalog_key: ::core::option::Option, + /// Captures the status of cache reservations for this execution. + #[prost(enumeration = "super::core::catalog_reservation::Status", tag = "3")] + pub reservation_status: i32, + /// The latest checkpoint location + #[prost(string, tag = "4")] + pub checkpoint_uri: ::prost::alloc::string::String, + /// In the case this task launched a dynamic workflow we capture its structure here. + #[prost(message, optional, tag = "16")] + pub dynamic_workflow: ::core::option::Option, +} +/// For dynamic workflow nodes we send information about the dynamic workflow definition that gets generated. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DynamicWorkflowNodeMetadata { + /// id represents the unique identifier of the workflow. + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, + /// Represents the compiled representation of the embedded dynamic workflow. + #[prost(message, optional, tag = "2")] + pub compiled_workflow: ::core::option::Option, + /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is + /// required to correctly recover partially completed executions where the workflow has already been compiled. + #[prost(string, tag = "3")] + pub dynamic_job_spec_uri: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParentTaskExecutionMetadata { + #[prost(message, optional, tag = "1")] + pub id: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParentNodeExecutionMetadata { + /// Unique identifier of the parent node id within the execution + /// This is value of core.NodeExecutionIdentifier.node_id of the parent node + #[prost(string, tag = "1")] + pub node_id: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventReason { + /// An explanation for this event + #[prost(string, tag = "1")] + pub reason: ::prost::alloc::string::String, + /// The time this reason occurred + #[prost(message, optional, tag = "2")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionEvent { + /// ID of the task. In combination with the retryAttempt this will indicate + /// the task execution uniquely for a given parent node execution. + #[prost(message, optional, tag = "1")] + pub task_id: ::core::option::Option, + /// A task execution is always kicked off by a node execution, the event consumer + /// will use the parent_id to relate the task to it's parent node execution + #[prost(message, optional, tag = "2")] + pub parent_node_execution_id: ::core::option::Option< + super::core::NodeExecutionIdentifier, + >, + /// retry attempt number for this task, ie., 2 for the second attempt + #[prost(uint32, tag = "3")] + pub retry_attempt: u32, + /// Phase associated with the event + #[prost(enumeration = "super::core::task_execution::Phase", tag = "4")] + pub phase: i32, + /// id of the process that sent this event, mainly for trace debugging + #[prost(string, tag = "5")] + pub producer_id: ::prost::alloc::string::String, + /// log information for the task execution + #[prost(message, repeated, tag = "6")] + pub logs: ::prost::alloc::vec::Vec, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the task. + #[prost(message, optional, tag = "7")] + pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, + /// Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + #[prost(message, optional, tag = "11")] + pub custom_info: ::core::option::Option<::prost_types::Struct>, + /// Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + /// that should be recorded regardless of the lack of phase change. + /// The version field should be incremented when metadata changes across the duration of an individual phase. + #[prost(uint32, tag = "12")] + pub phase_version: u32, + /// An optional explanation for the phase transition. + /// Deprecated: Use reasons instead. + #[deprecated] + #[prost(string, tag = "13")] + pub reason: ::prost::alloc::string::String, + /// An optional list of explanations for the phase transition. + #[prost(message, repeated, tag = "21")] + pub reasons: ::prost::alloc::vec::Vec, + /// A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + /// this type will be identical, but not all task executions necessarily use pre-registered definitions and this + /// type is useful to render the task in the UI, filter task executions, etc. + #[prost(string, tag = "14")] + pub task_type: ::prost::alloc::string::String, + /// Metadata around how a task was executed. + #[prost(message, optional, tag = "16")] + pub metadata: ::core::option::Option, + /// The event version is used to indicate versioned changes in how data is reported using this + /// proto message. For example, event_verison > 0 means that maps tasks report logs using the + /// TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + /// in this message. + #[prost(int32, tag = "18")] + pub event_version: i32, + /// This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + /// pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + /// but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + /// facilitates a more accurate portrayal of the evaluation time-series. + #[prost(message, optional, tag = "20")] + pub reported_at: ::core::option::Option<::prost_types::Timestamp>, + #[prost(oneof = "task_execution_event::InputValue", tags = "8, 19")] + pub input_value: ::core::option::Option, + #[prost(oneof = "task_execution_event::OutputResult", tags = "9, 10, 17")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `TaskExecutionEvent`. +pub mod task_execution_event { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputValue { + /// URI of the input file, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag = "8")] + InputUri(::prost::alloc::string::String), + /// Raw input data consumed by this task execution. + #[prost(message, tag = "19")] + InputData(super::super::core::LiteralMap), + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URI to the output of the execution, it will be in a format that encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag = "9")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag = "10")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this task execution. + #[prost(message, tag = "17")] + OutputData(super::super::core::LiteralMap), + } +} +/// This message contains metadata about external resources produced or used by a specific task execution. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExternalResourceInfo { + /// Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + #[prost(string, tag = "1")] + pub external_id: ::prost::alloc::string::String, + /// A unique index for the external resource with respect to all external resources for this task. Although the + /// identifier may change between task reporting events or retries, this will remain the same to enable aggregating + /// information from multiple reports. + #[prost(uint32, tag = "2")] + pub index: u32, + /// Retry attempt number for this external resource, ie., 2 for the second attempt + #[prost(uint32, tag = "3")] + pub retry_attempt: u32, + /// Phase associated with the external resource + #[prost(enumeration = "super::core::task_execution::Phase", tag = "4")] + pub phase: i32, + /// Captures the status of caching for this external resource execution. + #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "5")] + pub cache_status: i32, + /// log information for the external resource execution + #[prost(message, repeated, tag = "6")] + pub logs: ::prost::alloc::vec::Vec, +} +/// This message holds task execution metadata specific to resource allocation used to manage concurrent +/// executions for a project namespace. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourcePoolInfo { + /// Unique resource ID used to identify this execution when allocating a token. + #[prost(string, tag = "1")] + pub allocation_token: ::prost::alloc::string::String, + /// Namespace under which this task execution requested an allocation token. + #[prost(string, tag = "2")] + pub namespace: ::prost::alloc::string::String, +} +/// Holds metadata around how a task was executed. +/// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, +/// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. +/// Metadata is a container for these attributes across the task execution lifecycle. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionMetadata { + /// Unique, generated name for this task execution used by the backend. + #[prost(string, tag = "1")] + pub generated_name: ::prost::alloc::string::String, + /// Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + #[prost(message, repeated, tag = "2")] + pub external_resources: ::prost::alloc::vec::Vec, + /// Includes additional data on concurrent resource management used during execution.. + /// This is a repeated field because a plugin can request multiple resource allocations during execution. + #[prost(message, repeated, tag = "3")] + pub resource_pool_info: ::prost::alloc::vec::Vec, + /// The identifier of the plugin used to execute this task. + #[prost(string, tag = "4")] + pub plugin_identifier: ::prost::alloc::string::String, + #[prost(enumeration = "task_execution_metadata::InstanceClass", tag = "16")] + pub instance_class: i32, +} +/// Nested message and enum types in `TaskExecutionMetadata`. +pub mod task_execution_metadata { + /// Includes the broad category of machine used for this specific task execution. + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum InstanceClass { + /// The default instance class configured for the flyte application platform. + Default = 0, + /// The instance class configured for interruptible tasks. + Interruptible = 1, + } + impl InstanceClass { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + InstanceClass::Default => "DEFAULT", + InstanceClass::Interruptible => "INTERRUPTIBLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DEFAULT" => Some(Self::Default), + "INTERRUPTIBLE" => Some(Self::Interruptible), + _ => None, + } + } + } +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs new file mode 100644 index 0000000000..2c948f89ac --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs @@ -0,0 +1,207 @@ +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RunPolicy { + /// Defines the policy to kill pods after the job completes. Default to None. + #[prost(enumeration = "CleanPodPolicy", tag = "1")] + pub clean_pod_policy: i32, + /// TTL to clean up jobs. Default to infinite. + #[prost(int32, tag = "2")] + pub ttl_seconds_after_finished: i32, + /// Specifies the duration in seconds relative to the startTime that the job may be active + /// before the system tries to terminate it; value must be positive integer. + #[prost(int32, tag = "3")] + pub active_deadline_seconds: i32, + /// Number of retries before marking this job failed. + #[prost(int32, tag = "4")] + pub backoff_limit: i32, +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RestartPolicy { + Never = 0, + OnFailure = 1, + Always = 2, +} +impl RestartPolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RestartPolicy::Never => "RESTART_POLICY_NEVER", + RestartPolicy::OnFailure => "RESTART_POLICY_ON_FAILURE", + RestartPolicy::Always => "RESTART_POLICY_ALWAYS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESTART_POLICY_NEVER" => Some(Self::Never), + "RESTART_POLICY_ON_FAILURE" => Some(Self::OnFailure), + "RESTART_POLICY_ALWAYS" => Some(Self::Always), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CleanPodPolicy { + CleanpodPolicyNone = 0, + CleanpodPolicyRunning = 1, + CleanpodPolicyAll = 2, +} +impl CleanPodPolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CleanPodPolicy::CleanpodPolicyNone => "CLEANPOD_POLICY_NONE", + CleanPodPolicy::CleanpodPolicyRunning => "CLEANPOD_POLICY_RUNNING", + CleanPodPolicy::CleanpodPolicyAll => "CLEANPOD_POLICY_ALL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLEANPOD_POLICY_NONE" => Some(Self::CleanpodPolicyNone), + "CLEANPOD_POLICY_RUNNING" => Some(Self::CleanpodPolicyRunning), + "CLEANPOD_POLICY_ALL" => Some(Self::CleanpodPolicyAll), + _ => None, + } + } +} +/// Proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag = "1")] + pub worker_replicas: ::core::option::Option, + /// Master replicas spec + #[prost(message, optional, tag = "2")] + pub launcher_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag = "3")] + pub run_policy: ::core::option::Option, + /// Number of slots per worker + #[prost(int32, tag = "4")] + pub slots: i32, +} +/// Replica specification for distributed MPI training +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingReplicaSpec { + /// Number of replicas + #[prost(int32, tag = "1")] + pub replicas: i32, + /// Image used for the replica group + #[prost(string, tag = "2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[prost(message, optional, tag = "3")] + pub resources: ::core::option::Option, + /// Restart policy determines whether pods will be restarted when they exit + #[prost(enumeration = "RestartPolicy", tag = "4")] + pub restart_policy: i32, + /// MPI sometimes requires different command set for different replica groups + #[prost(string, repeated, tag = "5")] + pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// Custom proto for torch elastic config for distributed training using +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ElasticConfig { + #[prost(string, tag = "1")] + pub rdzv_backend: ::prost::alloc::string::String, + #[prost(int32, tag = "2")] + pub min_replicas: i32, + #[prost(int32, tag = "3")] + pub max_replicas: i32, + #[prost(int32, tag = "4")] + pub nproc_per_node: i32, + #[prost(int32, tag = "5")] + pub max_restarts: i32, +} +/// Proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedPyTorchTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag = "1")] + pub worker_replicas: ::core::option::Option, + /// Master replicas spec, master replicas can only have 1 replica + #[prost(message, optional, tag = "2")] + pub master_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag = "3")] + pub run_policy: ::core::option::Option, + /// config for an elastic pytorch job + #[prost(message, optional, tag = "4")] + pub elastic_config: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedPyTorchTrainingReplicaSpec { + /// Number of replicas + #[prost(int32, tag = "1")] + pub replicas: i32, + /// Image used for the replica group + #[prost(string, tag = "2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[prost(message, optional, tag = "3")] + pub resources: ::core::option::Option, + /// RestartPolicy determines whether pods will be restarted when they exit + #[prost(enumeration = "RestartPolicy", tag = "4")] + pub restart_policy: i32, +} +/// Proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag = "1")] + pub worker_replicas: ::core::option::Option< + DistributedTensorflowTrainingReplicaSpec, + >, + /// Parameter server replicas spec + #[prost(message, optional, tag = "2")] + pub ps_replicas: ::core::option::Option, + /// Chief replicas spec + #[prost(message, optional, tag = "3")] + pub chief_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag = "4")] + pub run_policy: ::core::option::Option, + /// Evaluator replicas spec + #[prost(message, optional, tag = "5")] + pub evaluator_replicas: ::core::option::Option< + DistributedTensorflowTrainingReplicaSpec, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingReplicaSpec { + /// Number of replicas + #[prost(int32, tag = "1")] + pub replicas: i32, + /// Image used for the replica group + #[prost(string, tag = "2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[prost(message, optional, tag = "3")] + pub resources: ::core::option::Option, + /// RestartPolicy Determines whether pods will be restarted when they exit + #[prost(enumeration = "RestartPolicy", tag = "4")] + pub restart_policy: i32, +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs new file mode 100644 index 0000000000..b1ab41f0f0 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs @@ -0,0 +1,346 @@ +/// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field +/// of a Presto task's TaskTemplate +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PrestoQuery { + #[prost(string, tag = "1")] + pub routing_group: ::prost::alloc::string::String, + #[prost(string, tag = "2")] + pub catalog: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub schema: ::prost::alloc::string::String, + #[prost(string, tag = "4")] + pub statement: ::prost::alloc::string::String, +} +/// Defines a query to execute on a hive cluster. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HiveQuery { + #[prost(string, tag = "1")] + pub query: ::prost::alloc::string::String, + #[prost(uint32, tag = "2")] + pub timeout_sec: u32, + #[prost(uint32, tag = "3")] + pub retry_count: u32, +} +/// Defines a collection of hive queries. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HiveQueryCollection { + #[prost(message, repeated, tag = "2")] + pub queries: ::prost::alloc::vec::Vec, +} +/// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field +/// of a hive task's TaskTemplate +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QuboleHiveJob { + #[prost(string, tag = "1")] + pub cluster_label: ::prost::alloc::string::String, + #[deprecated] + #[prost(message, optional, tag = "2")] + pub query_collection: ::core::option::Option, + #[prost(string, repeated, tag = "3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(message, optional, tag = "4")] + pub query: ::core::option::Option, +} +/// RayJobSpec defines the desired state of RayJob +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RayJob { + /// RayClusterSpec is the cluster template to run the job + #[prost(message, optional, tag = "1")] + pub ray_cluster: ::core::option::Option, + /// runtime_env is base64 encoded. + /// Ray runtime environments: + #[prost(string, tag = "2")] + pub runtime_env: ::prost::alloc::string::String, + /// shutdown_after_job_finishes specifies whether the RayCluster should be deleted after the RayJob finishes. + #[prost(bool, tag = "3")] + pub shutdown_after_job_finishes: bool, + /// ttl_seconds_after_finished specifies the number of seconds after which the RayCluster will be deleted after the RayJob finishes. + #[prost(int32, tag = "4")] + pub ttl_seconds_after_finished: i32, +} +/// Define Ray cluster defines the desired state of RayCluster +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RayCluster { + /// HeadGroupSpecs are the spec for the head pod + #[prost(message, optional, tag = "1")] + pub head_group_spec: ::core::option::Option, + /// WorkerGroupSpecs are the specs for the worker pods + #[prost(message, repeated, tag = "2")] + pub worker_group_spec: ::prost::alloc::vec::Vec, + /// Whether to enable autoscaling. + #[prost(bool, tag = "3")] + pub enable_autoscaling: bool, +} +/// HeadGroupSpec are the spec for the head pod +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeadGroupSpec { + /// Optional. RayStartParams are the params of the start command: address, object-store-memory. + /// Refer to + #[prost(map = "string, string", tag = "1")] + pub ray_start_params: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// WorkerGroupSpec are the specs for the worker pods +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkerGroupSpec { + /// Required. RayCluster can have multiple worker groups, and it distinguishes them by name + #[prost(string, tag = "1")] + pub group_name: ::prost::alloc::string::String, + /// Required. Desired replicas of the worker group. Defaults to 1. + #[prost(int32, tag = "2")] + pub replicas: i32, + /// Optional. Min replicas of the worker group. MinReplicas defaults to 1. + #[prost(int32, tag = "3")] + pub min_replicas: i32, + /// Optional. Max replicas of the worker group. MaxReplicas defaults to maxInt32 + #[prost(int32, tag = "4")] + pub max_replicas: i32, + /// Optional. RayStartParams are the params of the start command: address, object-store-memory. + /// Refer to + #[prost(map = "string, string", tag = "5")] + pub ray_start_params: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparkApplication {} +/// Nested message and enum types in `SparkApplication`. +pub mod spark_application { + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Type { + Python = 0, + Java = 1, + Scala = 2, + R = 3, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Python => "PYTHON", + Type::Java => "JAVA", + Type::Scala => "SCALA", + Type::R => "R", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "PYTHON" => Some(Self::Python), + "JAVA" => Some(Self::Java), + "SCALA" => Some(Self::Scala), + "R" => Some(Self::R), + _ => None, + } + } + } +} +/// Custom Proto for Spark Plugin. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparkJob { + #[prost(enumeration = "spark_application::Type", tag = "1")] + pub application_type: i32, + #[prost(string, tag = "2")] + pub main_application_file: ::prost::alloc::string::String, + #[prost(string, tag = "3")] + pub main_class: ::prost::alloc::string::String, + #[prost(map = "string, string", tag = "4")] + pub spark_conf: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + #[prost(map = "string, string", tag = "5")] + pub hadoop_conf: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Executor path for Python jobs. + #[prost(string, tag = "6")] + pub executor_path: ::prost::alloc::string::String, + /// Databricks job configuration. + /// Config structure can be found here. + #[prost(message, optional, tag = "7")] + pub databricks_conf: ::core::option::Option<::prost_types::Struct>, + /// Databricks access token. + /// This token can be set in either flytepropeller or flytekit. + #[prost(string, tag = "8")] + pub databricks_token: ::prost::alloc::string::String, + /// Domain name of your deployment. Use the form .cloud.databricks.com. + /// This instance name can be set in either flytepropeller or flytekit. + #[prost(string, tag = "9")] + pub databricks_instance: ::prost::alloc::string::String, +} +/// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component +/// will be executed concurrently. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArrayJob { + /// Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an + /// optimistic restriction and that, due to network partitioning or other failures, the actual number of currently + /// running instances might be more. This has to be a positive number if assigned. Default value is size. + #[prost(int64, tag = "1")] + pub parallelism: i64, + /// Defines the number of instances to launch at most. This number should match the size of the input if the job + /// requires processing of all input data. This has to be a positive number. + /// In the case this is not defined, the back-end will determine the size at run-time by reading the inputs. + #[prost(int64, tag = "2")] + pub size: i64, + #[prost(oneof = "array_job::SuccessCriteria", tags = "3, 4")] + pub success_criteria: ::core::option::Option, +} +/// Nested message and enum types in `ArrayJob`. +pub mod array_job { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SuccessCriteria { + /// An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, + /// the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if + /// assigned. Default value is size (if specified). + #[prost(int64, tag = "3")] + MinSuccesses(i64), + /// If the array job size is not known beforehand, the min_success_ratio can instead be used to determine when an array + /// job can be marked successful. + #[prost(float, tag = "4")] + MinSuccessRatio(f32), + } +} +/// Represents an Execution that was launched and could be waited on. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Waitable { + #[prost(message, optional, tag = "1")] + pub wf_exec_id: ::core::option::Option, + #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "2")] + pub phase: i32, + #[prost(string, tag = "3")] + pub workflow_id: ::prost::alloc::string::String, +} +/// Custom Proto for Dask Plugin. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DaskJob { + /// Spec for the scheduler pod. + #[prost(message, optional, tag = "1")] + pub scheduler: ::core::option::Option, + /// Spec of the default worker group. + #[prost(message, optional, tag = "2")] + pub workers: ::core::option::Option, +} +/// Specification for the scheduler pod. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DaskScheduler { + /// Optional image to use. If unset, will use the default image. + #[prost(string, tag = "1")] + pub image: ::prost::alloc::string::String, + /// Resources assigned to the scheduler pod. + #[prost(message, optional, tag = "2")] + pub resources: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DaskWorkerGroup { + /// Number of workers in the group. + #[prost(uint32, tag = "1")] + pub number_of_workers: u32, + /// Optional image to use for the pods of the worker group. If unset, will use the default image. + #[prost(string, tag = "2")] + pub image: ::prost::alloc::string::String, + /// Resources assigned to the all pods of the worker group. + /// As per + /// it is advised to only set limits. If requests are not explicitly set, the plugin will make + /// sure to set requests==limits. + /// The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit. + #[prost(message, optional, tag = "3")] + pub resources: ::core::option::Option, +} +/// MPI operator proposal +/// Custom proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingTask { + /// number of worker spawned in the cluster for this job + #[prost(int32, tag = "1")] + pub num_workers: i32, + /// number of launcher replicas spawned in the cluster for this job + /// The launcher pod invokes mpirun and communicates with worker pods through MPI. + #[prost(int32, tag = "2")] + pub num_launcher_replicas: i32, + /// number of slots per worker used in hostfile. + /// The available slots (GPUs) in each pod. + #[prost(int32, tag = "3")] + pub slots: i32, +} +/// Custom proto for torch elastic config for distributed training using +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ElasticConfig { + #[prost(string, tag = "1")] + pub rdzv_backend: ::prost::alloc::string::String, + #[prost(int32, tag = "2")] + pub min_replicas: i32, + #[prost(int32, tag = "3")] + pub max_replicas: i32, + #[prost(int32, tag = "4")] + pub nproc_per_node: i32, + #[prost(int32, tag = "5")] + pub max_restarts: i32, +} +/// Custom proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedPyTorchTrainingTask { + /// number of worker replicas spawned in the cluster for this job + #[prost(int32, tag = "1")] + pub workers: i32, + /// config for an elastic pytorch job + /// + #[prost(message, optional, tag = "2")] + pub elastic_config: ::core::option::Option, +} +/// Custom proto for plugin that enables distributed training using +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingTask { + /// number of worker replicas spawned in the cluster for this job + #[prost(int32, tag = "1")] + pub workers: i32, + /// PS -> Parameter server + /// number of ps replicas spawned in the cluster for this job + #[prost(int32, tag = "2")] + pub ps_replicas: i32, + /// number of chief replicas spawned in the cluster for this job + #[prost(int32, tag = "3")] + pub chief_replicas: i32, + /// number of evaluator replicas spawned in the cluster for this job + #[prost(int32, tag = "4")] + pub evaluator_replicas: i32, +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs new file mode 100644 index 0000000000..09b0c3d600 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs @@ -0,0 +1,3509 @@ +/// Generated client implementations. +pub mod signal_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// SignalService defines an RPC Service that may create, update, and retrieve signal(s). + #[derive(Debug, Clone)] + pub struct SignalServiceClient { + inner: tonic::client::Grpc, + } + impl SignalServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SignalServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SignalServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SignalServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. + pub async fn get_or_create_signal( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::SignalGetOrCreateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/GetOrCreateSignal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.SignalService", + "GetOrCreateSignal", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. + pub async fn list_signals( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/ListSignals", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.SignalService", "ListSignals"), + ); + self.inner.unary(req, path, codec).await + } + /// Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition + pub async fn set_signal( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SignalService/SetSignal", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.SignalService", "SetSignal")); + self.inner.unary(req, path, codec).await + } + } +} +/// Represents a request structure to create task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskCreateRequest { + /// The inputs required to start the execution. All required inputs must be + /// included in this map. If not required and not provided, defaults apply. + /// +optional + #[prost(message, optional, tag = "1")] + pub inputs: ::core::option::Option, + /// Template of the task that encapsulates all the metadata of the task. + #[prost(message, optional, tag = "2")] + pub template: ::core::option::Option, + /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) + #[prost(string, tag = "3")] + pub output_prefix: ::prost::alloc::string::String, +} +/// Represents a create response structure. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskCreateResponse { + #[prost(string, tag = "1")] + pub job_id: ::prost::alloc::string::String, +} +/// A message used to fetch a job state from backend plugin server. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskGetRequest { + /// A predefined yet extensible Task type identifier. + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// The unique id identifying the job. + #[prost(string, tag = "2")] + pub job_id: ::prost::alloc::string::String, +} +/// Response to get an individual task state. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskGetResponse { + /// The state of the execution is used to control its visibility in the UI/CLI. + #[prost(enumeration = "State", tag = "1")] + pub state: i32, + /// The outputs of the execution. It's typically used by sql task. Flyteplugins service will create a + /// Structured dataset pointing to the query result table. + /// +optional + #[prost(message, optional, tag = "2")] + pub outputs: ::core::option::Option, +} +/// A message used to delete a task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskDeleteRequest { + /// A predefined yet extensible Task type identifier. + #[prost(string, tag = "1")] + pub task_type: ::prost::alloc::string::String, + /// The unique id identifying the job. + #[prost(string, tag = "2")] + pub job_id: ::prost::alloc::string::String, +} +/// Response to delete a task. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskDeleteResponse {} +/// The state of the execution is used to control its visibility in the UI/CLI. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum State { + RetryableFailure = 0, + PermanentFailure = 1, + Pending = 2, + Running = 3, + Succeeded = 4, +} +impl State { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + State::RetryableFailure => "RETRYABLE_FAILURE", + State::PermanentFailure => "PERMANENT_FAILURE", + State::Pending => "PENDING", + State::Running => "RUNNING", + State::Succeeded => "SUCCEEDED", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RETRYABLE_FAILURE" => Some(Self::RetryableFailure), + "PERMANENT_FAILURE" => Some(Self::PermanentFailure), + "PENDING" => Some(Self::Pending), + "RUNNING" => Some(Self::Running), + "SUCCEEDED" => Some(Self::Succeeded), + _ => None, + } + } +} +/// Generated client implementations. +pub mod external_plugin_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// ExternalPluginService defines an RPC Service that allows propeller to send the request to the backend plugin server. + #[derive(Debug, Clone)] + pub struct ExternalPluginServiceClient { + inner: tonic::client::Grpc, + } + impl ExternalPluginServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl ExternalPluginServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> ExternalPluginServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + ExternalPluginServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Send a task create request to the backend plugin server. + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.ExternalPluginService", + "CreateTask", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Get job status. + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.ExternalPluginService", "GetTask"), + ); + self.inner.unary(req, path, codec).await + } + /// Delete the task resource. + pub async fn delete_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.ExternalPluginService/DeleteTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.ExternalPluginService", + "DeleteTask", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateUploadLocationResponse { + /// SignedUrl specifies the url to use to upload content to (e.g. ) + #[prost(string, tag = "1")] + pub signed_url: ::prost::alloc::string::String, + /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + #[prost(string, tag = "2")] + pub native_url: ::prost::alloc::string::String, + /// ExpiresAt defines when will the signed URL expires. + #[prost(message, optional, tag = "3")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, + /// Data proxy generates these headers for client, and they have to add these headers to the request when uploading the file. + #[prost(map = "string, string", tag = "4")] + pub headers: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// CreateUploadLocationRequest specified request for the CreateUploadLocation API. +/// The implementation in data proxy service will create the s3 location with some server side configured prefixes, +/// and then: +/// - project/domain/(a deterministic str representation of the content_md5)/filename (if present); OR +/// - project/domain/filename_root (if present)/filename (if present). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateUploadLocationRequest { + /// Project to create the upload location for + /// +required + #[prost(string, tag = "1")] + pub project: ::prost::alloc::string::String, + /// Domain to create the upload location for. + /// +required + #[prost(string, tag = "2")] + pub domain: ::prost::alloc::string::String, + /// Filename specifies a desired suffix for the generated location. E.g. `file.py` or `pre/fix/file.zip`. + /// +optional. By default, the service will generate a consistent name based on the provided parameters. + #[prost(string, tag = "3")] + pub filename: ::prost::alloc::string::String, + /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + /// exceeds the platform allowed max. + /// +optional. The default value comes from a global config. + #[prost(message, optional, tag = "4")] + pub expires_in: ::core::option::Option<::prost_types::Duration>, + /// ContentMD5 restricts the upload location to the specific MD5 provided. The ContentMD5 will also appear in the + /// generated path. + /// +required + #[prost(bytes = "vec", tag = "5")] + pub content_md5: ::prost::alloc::vec::Vec, + /// If present, data proxy will use this string in lieu of the md5 hash in the path. When the filename is also included + /// this makes the upload location deterministic. The native url will still be prefixed by the upload location prefix + /// in data proxy config. This option is useful when uploading multiple files. + /// +optional + #[prost(string, tag = "6")] + pub filename_root: ::prost::alloc::string::String, + /// If true, the data proxy will add content_md5 to the metadata to the signed URL and + /// it will force clients to add this metadata to the object. + /// This make sure dataproxy is backward compatible with the old flytekit. + #[prost(bool, tag = "7")] + pub add_content_md5_metadata: bool, + /// Optional, org key applied to the resource. + #[prost(string, tag = "8")] + pub org: ::prost::alloc::string::String, +} +/// CreateDownloadLocationRequest specified request for the CreateDownloadLocation API. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDownloadLocationRequest { + /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + #[prost(string, tag = "1")] + pub native_url: ::prost::alloc::string::String, + /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + /// exceeds the platform allowed max. + /// +optional. The default value comes from a global config. + #[prost(message, optional, tag = "2")] + pub expires_in: ::core::option::Option<::prost_types::Duration>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDownloadLocationResponse { + /// SignedUrl specifies the url to use to download content from (e.g. ) + #[prost(string, tag = "1")] + pub signed_url: ::prost::alloc::string::String, + /// ExpiresAt defines when will the signed URL expires. + #[prost(message, optional, tag = "2")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// CreateDownloadLinkRequest defines the request parameters to create a download link (signed url) +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDownloadLinkRequest { + /// ArtifactType of the artifact requested. + #[prost(enumeration = "ArtifactType", tag = "1")] + pub artifact_type: i32, + /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this + /// exceeds the platform allowed max. + /// +optional. The default value comes from a global config. + #[prost(message, optional, tag = "2")] + pub expires_in: ::core::option::Option<::prost_types::Duration>, + #[prost(oneof = "create_download_link_request::Source", tags = "3")] + pub source: ::core::option::Option, +} +/// Nested message and enum types in `CreateDownloadLinkRequest`. +pub mod create_download_link_request { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Source { + /// NodeId is the unique identifier for the node execution. For a task node, this will retrieve the output of the + /// most recent attempt of the task. + #[prost(message, tag = "3")] + NodeExecutionId(super::super::core::NodeExecutionIdentifier), + } +} +/// CreateDownloadLinkResponse defines the response for the generated links +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDownloadLinkResponse { + /// SignedUrl specifies the url to use to download content from (e.g. ) + #[deprecated] + #[prost(string, repeated, tag = "1")] + pub signed_url: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// ExpiresAt defines when will the signed URL expire. + #[deprecated] + #[prost(message, optional, tag = "2")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, + /// New wrapper object containing the signed urls and expiration time + #[prost(message, optional, tag = "3")] + pub pre_signed_urls: ::core::option::Option, +} +/// Wrapper object since the message is shared across this and the GetDataResponse +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PreSignedUrLs { + /// SignedUrl specifies the url to use to download content from (e.g. ) + #[prost(string, repeated, tag = "1")] + pub signed_url: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// ExpiresAt defines when will the signed URL expire. + #[prost(message, optional, tag = "2")] + pub expires_at: ::core::option::Option<::prost_types::Timestamp>, +} +/// General request artifact to retrieve data from a Flyte artifact url. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDataRequest { + /// A unique identifier in the form of flyte:// that uniquely, for a given Flyte + /// backend, identifies a Flyte artifact (\[i\]nput, \[o\]output, flyte \[d\]eck, etc.). + /// e.g. flyte://v1/proj/development/execid/n2/0/i (for 0th task execution attempt input) + /// flyte://v1/proj/development/execid/n2/i (for node execution input) + /// flyte://v1/proj/development/execid/n2/o/o3 (the o3 output of the second node) + #[prost(string, tag = "1")] + pub flyte_url: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDataResponse { + #[prost(oneof = "get_data_response::Data", tags = "1, 2, 3")] + pub data: ::core::option::Option, +} +/// Nested message and enum types in `GetDataResponse`. +pub mod get_data_response { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Data { + /// literal map data will be returned + #[prost(message, tag = "1")] + LiteralMap(super::super::core::LiteralMap), + /// Flyte deck html will be returned as a signed url users can download + #[prost(message, tag = "2")] + PreSignedUrls(super::PreSignedUrLs), + /// Single literal will be returned. This is returned when the user/url requests a specific output or input + /// by name. See the o3 example above. + #[prost(message, tag = "3")] + Literal(super::super::core::Literal), + } +} +/// ArtifactType +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ArtifactType { + /// ARTIFACT_TYPE_UNDEFINED is the default, often invalid, value for the enum. + Undefined = 0, + /// ARTIFACT_TYPE_DECK refers to the deck html file optionally generated after a task, a workflow or a launch plan + /// finishes executing. + Deck = 1, +} +impl ArtifactType { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ArtifactType::Undefined => "ARTIFACT_TYPE_UNDEFINED", + ArtifactType::Deck => "ARTIFACT_TYPE_DECK", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "ARTIFACT_TYPE_UNDEFINED" => Some(Self::Undefined), + "ARTIFACT_TYPE_DECK" => Some(Self::Deck), + _ => None, + } + } +} +/// Generated client implementations. +pub mod data_proxy_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. + #[derive(Debug, Clone)] + pub struct DataProxyServiceClient { + inner: tonic::client::Grpc, + } + impl DataProxyServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataProxyServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataProxyServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DataProxyServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. + pub async fn create_upload_location( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateUploadLocation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateUploadLocation", + ), + ); + self.inner.unary(req, path, codec).await + } + /// CreateDownloadLocation creates a signed url to download artifacts. + pub async fn create_download_location( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateDownloadLocation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateDownloadLocation", + ), + ); + self.inner.unary(req, path, codec).await + } + /// CreateDownloadLocation creates a signed url to download artifacts. + pub async fn create_download_link( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/CreateDownloadLink", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.DataProxyService", + "CreateDownloadLink", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.DataProxyService/GetData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.DataProxyService", "GetData")); + self.inner.unary(req, path, codec).await + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UserInfoRequest {} +/// See the OpenID Connect spec at for more information. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UserInfoResponse { + /// Locally unique and never reassigned identifier within the Issuer for the End-User, which is intended to be consumed + /// by the Client. + #[prost(string, tag = "1")] + pub subject: ::prost::alloc::string::String, + /// Full name + #[prost(string, tag = "2")] + pub name: ::prost::alloc::string::String, + /// Shorthand name by which the End-User wishes to be referred to + #[prost(string, tag = "3")] + pub preferred_username: ::prost::alloc::string::String, + /// Given name(s) or first name(s) + #[prost(string, tag = "4")] + pub given_name: ::prost::alloc::string::String, + /// Surname(s) or last name(s) + #[prost(string, tag = "5")] + pub family_name: ::prost::alloc::string::String, + /// Preferred e-mail address + #[prost(string, tag = "6")] + pub email: ::prost::alloc::string::String, + /// Profile picture URL + #[prost(string, tag = "7")] + pub picture: ::prost::alloc::string::String, + /// Additional claims + #[prost(message, optional, tag = "8")] + pub additional_claims: ::core::option::Option<::prost_types::Struct>, +} +/// Generated client implementations. +pub mod identity_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// IdentityService defines an RPC Service that interacts with user/app identities. + #[derive(Debug, Clone)] + pub struct IdentityServiceClient { + inner: tonic::client::Grpc, + } + impl IdentityServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl IdentityServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> IdentityServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + IdentityServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Retrieves user information about the currently logged in user. + pub async fn user_info( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.IdentityService/UserInfo", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.IdentityService", "UserInfo")); + self.inner.unary(req, path, codec).await + } + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OAuth2MetadataRequest {} +/// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata +/// as defined in +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OAuth2MetadataResponse { + /// Defines the issuer string in all JWT tokens this server issues. The issuer can be admin itself or an external + /// issuer. + #[prost(string, tag = "1")] + pub issuer: ::prost::alloc::string::String, + /// URL of the authorization server's authorization endpoint \[RFC6749\]. This is REQUIRED unless no grant types are + /// supported that use the authorization endpoint. + #[prost(string, tag = "2")] + pub authorization_endpoint: ::prost::alloc::string::String, + /// URL of the authorization server's token endpoint \[RFC6749\]. + #[prost(string, tag = "3")] + pub token_endpoint: ::prost::alloc::string::String, + /// Array containing a list of the OAuth 2.0 response_type values that this authorization server supports. + #[prost(string, repeated, tag = "4")] + pub response_types_supported: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, + /// JSON array containing a list of the OAuth 2.0 \[RFC6749\] scope values that this authorization server supports. + #[prost(string, repeated, tag = "5")] + pub scopes_supported: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// JSON array containing a list of client authentication methods supported by this token endpoint. + #[prost(string, repeated, tag = "6")] + pub token_endpoint_auth_methods_supported: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, + /// URL of the authorization server's JWK Set \[JWK\] document. The referenced document contains the signing key(s) the + /// client uses to validate signatures from the authorization server. + #[prost(string, tag = "7")] + pub jwks_uri: ::prost::alloc::string::String, + /// JSON array containing a list of Proof Key for Code Exchange (PKCE) \[RFC7636\] code challenge methods supported by + /// this authorization server. + #[prost(string, repeated, tag = "8")] + pub code_challenge_methods_supported: ::prost::alloc::vec::Vec< + ::prost::alloc::string::String, + >, + /// JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. + #[prost(string, repeated, tag = "9")] + pub grant_types_supported: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// URL of the authorization server's device authorization endpoint, as defined in Section 3.1 of \[RFC8628\] + #[prost(string, tag = "10")] + pub device_authorization_endpoint: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PublicClientAuthConfigRequest {} +/// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PublicClientAuthConfigResponse { + /// client_id to use when initiating OAuth2 authorization requests. + #[prost(string, tag = "1")] + pub client_id: ::prost::alloc::string::String, + /// redirect uri to use when initiating OAuth2 authorization requests. + #[prost(string, tag = "2")] + pub redirect_uri: ::prost::alloc::string::String, + /// scopes to request when initiating OAuth2 authorization requests. + #[prost(string, repeated, tag = "3")] + pub scopes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Authorization Header to use when passing Access Tokens to the server. If not provided, the client should use the + /// default http `Authorization` header. + #[prost(string, tag = "4")] + pub authorization_metadata_key: ::prost::alloc::string::String, + /// ServiceHttpEndpoint points to the http endpoint for the backend. If empty, clients can assume the endpoint used + /// to configure the gRPC connection can be used for the http one respecting the insecure flag to choose between + /// SSL or no SSL connections. + #[prost(string, tag = "5")] + pub service_http_endpoint: ::prost::alloc::string::String, + /// audience to use when initiating OAuth2 authorization requests. + #[prost(string, tag = "6")] + pub audience: ::prost::alloc::string::String, +} +/// Generated client implementations. +pub mod auth_metadata_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// The following defines an RPC service that is also served over HTTP via grpc-gateway. + /// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go + /// RPCs defined in this service must be anonymously accessible. + #[derive(Debug, Clone)] + pub struct AuthMetadataServiceClient { + inner: tonic::client::Grpc, + } + impl AuthMetadataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AuthMetadataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AuthMetadataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AuthMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Anonymously accessible. Retrieves local or external oauth authorization server metadata. + pub async fn get_o_auth2_metadata( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AuthMetadataService/GetOAuth2Metadata", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AuthMetadataService", + "GetOAuth2Metadata", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization + /// requests. + pub async fn get_public_client_config( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AuthMetadataService/GetPublicClientConfig", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AuthMetadataService", + "GetPublicClientConfig", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod sync_agent_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// SyncAgentService defines an RPC Service that allows propeller to send the request to the agent server synchronously. + #[derive(Debug, Clone)] + pub struct SyncAgentServiceClient { + inner: tonic::client::Grpc, + } + impl SyncAgentServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl SyncAgentServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> SyncAgentServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + SyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. + pub async fn execute_task_sync( + &mut self, + request: impl tonic::IntoStreamingRequest< + Message = super::super::admin::ExecuteTaskSyncRequest, + >, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.SyncAgentService/ExecuteTaskSync", + ); + let mut req = request.into_streaming_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.SyncAgentService", + "ExecuteTaskSync", + ), + ); + self.inner.streaming(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod async_agent_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. + #[derive(Debug, Clone)] + pub struct AsyncAgentServiceClient { + inner: tonic::client::Grpc, + } + impl AsyncAgentServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AsyncAgentServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AsyncAgentServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AsyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// CreateTask sends a task create request to the agent service. + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "CreateTask"), + ); + self.inner.unary(req, path, codec).await + } + /// Get job status. + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTask"), + ); + self.inner.unary(req, path, codec).await + } + /// Delete the task resource. + pub async fn delete_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/DeleteTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "DeleteTask"), + ); + self.inner.unary(req, path, codec).await + } + /// GetTaskMetrics returns one or more task execution metrics, if available. + /// + /// Errors include + /// * OutOfRange if metrics are not available for the specified task time range + /// * various other errors + pub async fn get_task_metrics( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTaskMetrics", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AsyncAgentService", + "GetTaskMetrics", + ), + ); + self.inner.unary(req, path, codec).await + } + /// GetTaskLogs returns task execution logs, if available. + pub async fn get_task_logs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response< + tonic::codec::Streaming, + >, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AsyncAgentService/GetTaskLogs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTaskLogs"), + ); + self.inner.server_streaming(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod agent_metadata_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. + /// This service allows propeller or users to get the metadata of agents. + #[derive(Debug, Clone)] + pub struct AgentMetadataServiceClient { + inner: tonic::client::Grpc, + } + impl AgentMetadataServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AgentMetadataServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AgentMetadataServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AgentMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Fetch a :ref:`ref_flyteidl.admin.Agent` definition. + pub async fn get_agent( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AgentMetadataService/GetAgent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AgentMetadataService", "GetAgent"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. + pub async fn list_agents( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AgentMetadataService/ListAgents", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AgentMetadataService", + "ListAgents", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated client implementations. +pub mod admin_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// The following defines an RPC service that is also served over HTTP via grpc-gateway. + /// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go + #[derive(Debug, Clone)] + pub struct AdminServiceClient { + inner: tonic::client::Grpc, + } + impl AdminServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl AdminServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> AdminServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + AdminServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Create and upload a :ref:`ref_flyteidl.admin.Task` definition + pub async fn create_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "CreateTask")); + self.inner.unary(req, path, codec).await + } + /// Fetch a :ref:`ref_flyteidl.admin.Task` definition. + pub async fn get_task( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetTask")); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. + pub async fn list_task_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTaskIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTaskIds")); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. + pub async fn list_tasks( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTasks", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTasks")); + self.inner.unary(req, path, codec).await + } + /// Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition + pub async fn create_workflow( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateWorkflow"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. + pub async fn get_workflow( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetWorkflow")); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. + pub async fn list_workflow_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListWorkflowIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflowIds"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. + pub async fn list_workflows( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListWorkflows", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflows"), + ); + self.inner.unary(req, path, codec).await + } + /// Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition + pub async fn create_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::LaunchPlanCreateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. + pub async fn get_launch_plan( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. + pub async fn get_active_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ActiveLaunchPlanRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetActiveLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetActiveLaunchPlan", + ), + ); + self.inner.unary(req, path, codec).await + } + /// List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. + pub async fn list_active_launch_plans( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ActiveLaunchPlanListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListActiveLaunchPlans", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListActiveLaunchPlans", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. + pub async fn list_launch_plan_ids( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityIdentifierListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListLaunchPlanIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlanIds"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. + pub async fn list_launch_plans( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListLaunchPlans", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlans"), + ); + self.inner.unary(req, path, codec).await + } + /// Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. + pub async fn update_launch_plan( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::LaunchPlanUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateLaunchPlan", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateLaunchPlan"), + ); + self.inner.unary(req, path, codec).await + } + /// Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` + pub async fn create_execution( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` + pub async fn relaunch_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionRelaunchRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RelaunchExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RelaunchExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Recreates a previously-run workflow execution that will only start executing from the last known failure point. + /// In Recover mode, users cannot change any input parameters or update the version of the execution. + /// This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, + /// downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. + /// See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. + pub async fn recover_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionRecoverRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RecoverExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RecoverExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a :ref:`ref_flyteidl.admin.Execution`. + pub async fn get_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. + pub async fn update_execution( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. + pub async fn get_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetExecutionData"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.Execution`. + pub async fn list_executions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListExecutions"), + ); + self.inner.unary(req, path, codec).await + } + /// Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. + pub async fn terminate_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ExecutionTerminateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/TerminateExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "TerminateExecution", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. + pub async fn get_node_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNodeExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetNodeExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. + pub async fn get_dynamic_node_workflow( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::GetDynamicNodeWorkflowRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetDynamicNodeWorkflow", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetDynamicNodeWorkflow", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. + pub async fn list_node_executions( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNodeExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListNodeExecutions", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. + pub async fn list_node_executions_for_task( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionForTaskListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNodeExecutionsForTask", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListNodeExecutionsForTask", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. + pub async fn get_node_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNodeExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetNodeExecutionData", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. + pub async fn register_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/RegisterProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "RegisterProject"), + ); + self.inner.unary(req, path, codec).await + } + /// Updates an existing :ref:`ref_flyteidl.admin.Project` + /// flyteidl.admin.Project should be passed but the domains property should be empty; + /// it will be ignored in the handler as domains cannot be updated via this API. + pub async fn update_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateProject"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a :ref:`ref_flyteidl.admin.Project` + pub async fn get_project( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProject", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetProject")); + self.inner.unary(req, path, codec).await + } + /// Fetches a list of :ref:`ref_flyteidl.admin.Project` + pub async fn list_projects( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListProjects", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListProjects"), + ); + self.inner.unary(req, path, codec).await + } + /// Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. + pub async fn create_workflow_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateWorkflowEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "CreateWorkflowEvent", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. + pub async fn create_node_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NodeExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateNodeEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateNodeEvent"), + ); + self.inner.unary(req, path, codec).await + } + /// Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. + pub async fn create_task_event( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionEventRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/CreateTaskEvent", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "CreateTaskEvent"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. + pub async fn get_task_execution( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTaskExecution", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetTaskExecution"), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. + pub async fn list_task_executions( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListTaskExecutions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListTaskExecutions", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. + pub async fn get_task_execution_data( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::TaskExecutionGetDataRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetTaskExecutionData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetTaskExecutionData", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + pub async fn update_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + pub async fn get_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + pub async fn delete_project_domain_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectDomainAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteProjectDomainAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteProjectDomainAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level + pub async fn update_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + pub async fn get_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. + pub async fn delete_project_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ProjectAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteProjectAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteProjectAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + pub async fn update_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "UpdateWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + pub async fn get_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesGetRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. + pub async fn delete_workflow_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowAttributesDeleteRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/DeleteWorkflowAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "DeleteWorkflowAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. + pub async fn list_matchable_attributes( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::ListMatchableAttributesRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListMatchableAttributes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListMatchableAttributes", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. + pub async fn list_named_entities( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListNamedEntities", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "ListNamedEntities"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. + pub async fn get_named_entity( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetNamedEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "GetNamedEntity"), + ); + self.inner.unary(req, path, codec).await + } + /// Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. + pub async fn update_named_entity( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::NamedEntityUpdateRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/UpdateNamedEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl.service.AdminService", "UpdateNamedEntity"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_version( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetVersion", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetVersion")); + self.inner.unary(req, path, codec).await + } + /// Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. + pub async fn get_description_entity( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetDescriptionEntity", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetDescriptionEntity", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. + pub async fn list_description_entities( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::DescriptionEntityListRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/ListDescriptionEntities", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "ListDescriptionEntities", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. + pub async fn get_execution_metrics( + &mut self, + request: impl tonic::IntoRequest< + super::super::admin::WorkflowExecutionGetMetricsRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl.service.AdminService/GetExecutionMetrics", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl.service.AdminService", + "GetExecutionMetrics", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/google.api.rs b/flyrs/src/gen/pb_rust/flyteidl/google.api.rs new file mode 100644 index 0000000000..2c0fd163be --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/google.api.rs @@ -0,0 +1,367 @@ +/// Defines the HTTP configuration for an API service. It contains a list of +/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method +/// to one or more HTTP REST API methods. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Http { + /// A list of HTTP configuration rules that apply to individual API methods. + /// + /// **NOTE:** All service configuration rules follow "last one wins" order. + #[prost(message, repeated, tag = "1")] + pub rules: ::prost::alloc::vec::Vec, + /// When set to true, URL path parameters will be fully URI-decoded except in + /// cases of single segment matches in reserved expansion, where "%2F" will be + /// left encoded. + /// + /// The default behavior is to not decode RFC 6570 reserved characters in multi + /// segment matches. + #[prost(bool, tag = "2")] + pub fully_decode_reserved_expansion: bool, +} +/// # gRPC Transcoding +/// +/// gRPC Transcoding is a feature for mapping between a gRPC method and one or +/// more HTTP REST endpoints. It allows developers to build a single API service +/// that supports both gRPC APIs and REST APIs. Many systems, including [Google +/// APIs](), +/// [Cloud Endpoints](), [gRPC +/// Gateway](), +/// and [Envoy]() proxy support this feature +/// and use it for large scale production services. +/// +/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies +/// how different portions of the gRPC request message are mapped to the URL +/// path, URL query parameters, and HTTP request body. It also controls how the +/// gRPC response message is mapped to the HTTP response body. `HttpRule` is +/// typically specified as an `google.api.http` annotation on the gRPC method. +/// +/// Each mapping specifies a URL path template and an HTTP method. The path +/// template may refer to one or more fields in the gRPC request message, as long +/// as each field is a non-repeated field with a primitive (non-message) type. +/// The path template controls how fields of the request message are mapped to +/// the URL path. +/// +/// Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/{name=messages/*}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string name = 1; // Mapped to URL path. +/// } +/// message Message { +/// string text = 1; // The resource content. +/// } +/// +/// This enables an HTTP REST to gRPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` +/// +/// Any fields in the request message which are not bound by the path template +/// automatically become HTTP query parameters if there is no HTTP request body. +/// For example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get:"/v1/messages/{message_id}" +/// }; +/// } +/// } +/// message GetMessageRequest { +/// message SubMessage { +/// string subfield = 1; +/// } +/// string message_id = 1; // Mapped to URL path. +/// int64 revision = 2; // Mapped to URL query parameter `revision`. +/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. +/// } +/// +/// This enables a HTTP JSON to RPC mapping as below: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | +/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: +/// "foo"))` +/// +/// Note that fields which are mapped to URL query parameters must have a +/// primitive type or a repeated primitive type or a non-repeated message type. +/// In the case of a repeated type, the parameter can be repeated in the URL +/// as `...?param=A¶m=B`. In the case of a message type, each field of the +/// message is mapped to a separate parameter, such as +/// `...?foo.a=A&foo.b=B&foo.c=C`. +/// +/// For HTTP methods that allow a request body, the `body` field +/// specifies the mapping. Consider a REST update method on the +/// message resource collection: +/// +/// service Messaging { +/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "message" +/// }; +/// } +/// } +/// message UpdateMessageRequest { +/// string message_id = 1; // mapped to the URL +/// Message message = 2; // mapped to the body +/// } +/// +/// The following HTTP JSON to RPC mapping is enabled, where the +/// representation of the JSON in the request body is determined by +/// protos JSON encoding: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" message { text: "Hi!" })` +/// +/// The special name `*` can be used in the body mapping to define that +/// every field not bound by the path template should be mapped to the +/// request body. This enables the following alternative definition of +/// the update method: +/// +/// service Messaging { +/// rpc UpdateMessage(Message) returns (Message) { +/// option (google.api.http) = { +/// patch: "/v1/messages/{message_id}" +/// body: "*" +/// }; +/// } +/// } +/// message Message { +/// string message_id = 1; +/// string text = 2; +/// } +/// +/// +/// The following HTTP JSON to RPC mapping is enabled: +/// +/// HTTP | gRPC +/// -----|----- +/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: +/// "123456" text: "Hi!")` +/// +/// Note that when using `*` in the body mapping, it is not possible to +/// have HTTP parameters, as all fields not bound by the path end in +/// the body. This makes this option more rarely used in practice when +/// defining REST APIs. The common usage of `*` is in custom methods +/// which don't use the URL at all for transferring data. +/// +/// It is possible to define multiple HTTP methods for one RPC by using +/// the `additional_bindings` option. Example: +/// +/// service Messaging { +/// rpc GetMessage(GetMessageRequest) returns (Message) { +/// option (google.api.http) = { +/// get: "/v1/messages/{message_id}" +/// additional_bindings { +/// get: "/v1/users/{user_id}/messages/{message_id}" +/// } +/// }; +/// } +/// } +/// message GetMessageRequest { +/// string message_id = 1; +/// string user_id = 2; +/// } +/// +/// This enables the following two alternative HTTP JSON to RPC mappings: +/// +/// HTTP | gRPC +/// -----|----- +/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` +/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: +/// "123456")` +/// +/// ## Rules for HTTP mapping +/// +/// 1. Leaf request fields (recursive expansion nested messages in the request +/// message) are classified into three categories: +/// - Fields referred by the path template. They are passed via the URL path. +/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP +/// request body. +/// - All other fields are passed via the URL query parameters, and the +/// parameter name is the field path in the request message. A repeated +/// field can be represented as multiple query parameters under the same +/// name. +/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields +/// are passed via URL path and HTTP request body. +/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all +/// fields are passed via URL path and URL query parameters. +/// +/// ### Path template syntax +/// +/// Template = "/" Segments \[ Verb \] ; +/// Segments = Segment { "/" Segment } ; +/// Segment = "*" | "**" | LITERAL | Variable ; +/// Variable = "{" FieldPath \[ "=" Segments \] "}" ; +/// FieldPath = IDENT { "." IDENT } ; +/// Verb = ":" LITERAL ; +/// +/// The syntax `*` matches a single URL path segment. The syntax `**` matches +/// zero or more URL path segments, which must be the last part of the URL path +/// except the `Verb`. +/// +/// The syntax `Variable` matches part of the URL path as specified by its +/// template. A variable template must not contain other variables. If a variable +/// matches a single path segment, its template may be omitted, e.g. `{var}` +/// is equivalent to `{var=*}`. +/// +/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` +/// contains any reserved character, such characters should be percent-encoded +/// before the matching. +/// +/// If a variable contains exactly one path segment, such as `"{var}"` or +/// `"{var=*}"`, when such a variable is expanded into a URL path on the client +/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The +/// server side does the reverse decoding. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{var}`. +/// +/// If a variable contains multiple path segments, such as `"{var=foo/*}"` +/// or `"{var=**}"`, when such a variable is expanded into a URL path on the +/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. +/// The server side does the reverse decoding, except "%2F" and "%2f" are left +/// unchanged. Such variables show up in the +/// [Discovery +/// Document]() as +/// `{+var}`. +/// +/// ## Using gRPC API Service Configuration +/// +/// gRPC API Service Configuration (service config) is a configuration language +/// for configuring a gRPC service to become a user-facing product. The +/// service config is simply the YAML representation of the `google.api.Service` +/// proto message. +/// +/// As an alternative to annotating your proto file, you can configure gRPC +/// transcoding in your service config YAML files. You do this by specifying a +/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same +/// effect as the proto annotation. This can be particularly useful if you +/// have a proto that is reused in multiple services. Note that any transcoding +/// specified in the service config will override any matching transcoding +/// configuration in the proto. +/// +/// Example: +/// +/// http: +/// rules: +/// # Selects a gRPC method and applies HttpRule to it. +/// - selector: example.v1.Messaging.GetMessage +/// get: /v1/messages/{message_id}/{sub.subfield} +/// +/// ## Special notes +/// +/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the +/// proto to JSON conversion must follow the [proto3 +/// specification](). +/// +/// While the single segment variable follows the semantics of +/// [RFC 6570]() Section 3.2.2 Simple String +/// Expansion, the multi segment variable **does not** follow RFC 6570 Section +/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion +/// does not expand special characters like `?` and `#`, which would lead +/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding +/// for multi segment variables. +/// +/// The path variables **must not** refer to any repeated or mapped field, +/// because client libraries are not capable of handling such variable expansion. +/// +/// The path variables **must not** capture the leading "/" character. The reason +/// is that the most common use case "{var}" does not capture the leading "/" +/// character. For consistency, all path variables must share the same behavior. +/// +/// Repeated message fields must not be mapped to URL query parameters, because +/// no client library can support such complicated mapping. +/// +/// If an API needs to use a JSON array for request or response body, it can map +/// the request or response body to a repeated field. However, some gRPC +/// Transcoding implementations may not support this feature. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HttpRule { + /// Selects a method to which this rule applies. + /// + /// Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + #[prost(string, tag = "1")] + pub selector: ::prost::alloc::string::String, + /// The name of the request field whose value is mapped to the HTTP request + /// body, or `*` for mapping all request fields not captured by the path + /// pattern to the HTTP body, or omitted for not having any HTTP request body. + /// + /// NOTE: the referred field must be present at the top-level of the request + /// message type. + #[prost(string, tag = "7")] + pub body: ::prost::alloc::string::String, + /// Optional. The name of the response field whose value is mapped to the HTTP + /// response body. When omitted, the entire response message will be used + /// as the HTTP response body. + /// + /// NOTE: The referred field must be present at the top-level of the response + /// message type. + #[prost(string, tag = "12")] + pub response_body: ::prost::alloc::string::String, + /// Additional HTTP bindings for the selector. Nested bindings must + /// not contain an `additional_bindings` field themselves (that is, + /// the nesting may only be one level deep). + #[prost(message, repeated, tag = "11")] + pub additional_bindings: ::prost::alloc::vec::Vec, + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] + pub pattern: ::core::option::Option, +} +/// Nested message and enum types in `HttpRule`. +pub mod http_rule { + /// Determines the URL pattern is matched by this rules. This pattern can be + /// used with any of the {get|put|post|delete|patch} methods. A custom method + /// can be defined using the 'custom' field. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Pattern { + /// Maps to HTTP GET. Used for listing and getting information about + /// resources. + #[prost(string, tag = "2")] + Get(::prost::alloc::string::String), + /// Maps to HTTP PUT. Used for replacing a resource. + #[prost(string, tag = "3")] + Put(::prost::alloc::string::String), + /// Maps to HTTP POST. Used for creating a resource or performing an action. + #[prost(string, tag = "4")] + Post(::prost::alloc::string::String), + /// Maps to HTTP DELETE. Used for deleting a resource. + #[prost(string, tag = "5")] + Delete(::prost::alloc::string::String), + /// Maps to HTTP PATCH. Used for updating a resource. + #[prost(string, tag = "6")] + Patch(::prost::alloc::string::String), + /// The custom pattern is used for specifying an HTTP method that is not + /// included in the `pattern` field, such as HEAD, or "*" to leave the + /// HTTP method unspecified for this rule. The wild-card rule is useful + /// for services that provide content to Web (HTML) clients. + #[prost(message, tag = "8")] + Custom(super::CustomHttpPattern), + } +} +/// A custom pattern is used for defining custom HTTP verb. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CustomHttpPattern { + /// The name of this custom HTTP verb. + #[prost(string, tag = "1")] + pub kind: ::prost::alloc::string::String, + /// The path matched by this custom verb. + #[prost(string, tag = "2")] + pub path: ::prost::alloc::string::String, +} diff --git a/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs b/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs new file mode 100644 index 0000000000..f9ca7e84ba --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs @@ -0,0 +1,1019 @@ +/// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. +/// +/// See: +/// +/// Example: +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +/// info: { +/// title: "Echo API"; +/// version: "1.0"; +/// description: ""; +/// contact: { +/// name: "gRPC-Gateway project"; +/// url: " +/// email: "none@example.com"; +/// }; +/// license: { +/// name: "BSD 3-Clause License"; +/// url: " +/// }; +/// }; +/// schemes: HTTPS; +/// consumes: "application/json"; +/// produces: "application/json"; +/// }; +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Swagger { + /// Specifies the OpenAPI Specification version being used. It can be + /// used by the OpenAPI UI and other clients to interpret the API listing. The + /// value MUST be "2.0". + #[prost(string, tag = "1")] + pub swagger: ::prost::alloc::string::String, + /// Provides metadata about the API. The metadata can be used by the + /// clients if needed. + #[prost(message, optional, tag = "2")] + pub info: ::core::option::Option, + /// The host (name or ip) serving the API. This MUST be the host only and does + /// not include the scheme nor sub-paths. It MAY include a port. If the host is + /// not included, the host serving the documentation is to be used (including + /// the port). The host does not support path templating. + #[prost(string, tag = "3")] + pub host: ::prost::alloc::string::String, + /// The base path on which the API is served, which is relative to the host. If + /// it is not included, the API is served directly under the host. The value + /// MUST start with a leading slash (/). The basePath does not support path + /// templating. + /// Note that using `base_path` does not change the endpoint paths that are + /// generated in the resulting OpenAPI file. If you wish to use `base_path` + /// with relatively generated OpenAPI paths, the `base_path` prefix must be + /// manually removed from your `google.api.http` paths and your code changed to + /// serve the API from the `base_path`. + #[prost(string, tag = "4")] + pub base_path: ::prost::alloc::string::String, + /// The transfer protocol of the API. Values MUST be from the list: "http", + /// "https", "ws", "wss". If the schemes is not included, the default scheme to + /// be used is the one used to access the OpenAPI definition itself. + #[prost(enumeration = "Scheme", repeated, tag = "5")] + pub schemes: ::prost::alloc::vec::Vec, + /// A list of MIME types the APIs can consume. This is global to all APIs but + /// can be overridden on specific API calls. Value MUST be as described under + /// Mime Types. + #[prost(string, repeated, tag = "6")] + pub consumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A list of MIME types the APIs can produce. This is global to all APIs but + /// can be overridden on specific API calls. Value MUST be as described under + /// Mime Types. + #[prost(string, repeated, tag = "7")] + pub produces: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// An object to hold responses that can be used across operations. This + /// property does not define global responses for all operations. + #[prost(map = "string, message", tag = "10")] + pub responses: ::std::collections::HashMap<::prost::alloc::string::String, Response>, + /// Security scheme definitions that can be used across the specification. + #[prost(message, optional, tag = "11")] + pub security_definitions: ::core::option::Option, + /// A declaration of which security schemes are applied for the API as a whole. + /// The list of values describes alternative security schemes that can be used + /// (that is, there is a logical OR between the security requirements). + /// Individual operations can override this definition. + #[prost(message, repeated, tag = "12")] + pub security: ::prost::alloc::vec::Vec, + /// A list of tags for API documentation control. Tags can be used for logical + /// grouping of operations by resources or any other qualifier. + #[prost(message, repeated, tag = "13")] + pub tags: ::prost::alloc::vec::Vec, + /// Additional external documentation. + #[prost(message, optional, tag = "14")] + pub external_docs: ::core::option::Option, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "15")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// `Operation` is a representation of OpenAPI v2 specification's Operation object. +/// +/// See: +/// +/// Example: +/// +/// service EchoService { +/// rpc Echo(SimpleMessage) returns (SimpleMessage) { +/// option (google.api.http) = { +/// get: "/v1/example/echo/{id}" +/// }; +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { +/// summary: "Get a message."; +/// operation_id: "getMessage"; +/// tags: "echo"; +/// responses: { +/// key: "200" +/// value: { +/// description: "OK"; +/// } +/// } +/// }; +/// } +/// } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Operation { + /// A list of tags for API documentation control. Tags can be used for logical + /// grouping of operations by resources or any other qualifier. + #[prost(string, repeated, tag = "1")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A short summary of what the operation does. For maximum readability in the + /// swagger-ui, this field SHOULD be less than 120 characters. + #[prost(string, tag = "2")] + pub summary: ::prost::alloc::string::String, + /// A verbose explanation of the operation behavior. GFM syntax can be used for + /// rich text representation. + #[prost(string, tag = "3")] + pub description: ::prost::alloc::string::String, + /// Additional external documentation for this operation. + #[prost(message, optional, tag = "4")] + pub external_docs: ::core::option::Option, + /// Unique string used to identify the operation. The id MUST be unique among + /// all operations described in the API. Tools and libraries MAY use the + /// operationId to uniquely identify an operation, therefore, it is recommended + /// to follow common programming naming conventions. + #[prost(string, tag = "5")] + pub operation_id: ::prost::alloc::string::String, + /// A list of MIME types the operation can consume. This overrides the consumes + /// definition at the OpenAPI Object. An empty value MAY be used to clear the + /// global definition. Value MUST be as described under Mime Types. + #[prost(string, repeated, tag = "6")] + pub consumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// A list of MIME types the operation can produce. This overrides the produces + /// definition at the OpenAPI Object. An empty value MAY be used to clear the + /// global definition. Value MUST be as described under Mime Types. + #[prost(string, repeated, tag = "7")] + pub produces: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// The list of possible responses as they are returned from executing this + /// operation. + #[prost(map = "string, message", tag = "9")] + pub responses: ::std::collections::HashMap<::prost::alloc::string::String, Response>, + /// The transfer protocol for the operation. Values MUST be from the list: + /// "http", "https", "ws", "wss". The value overrides the OpenAPI Object + /// schemes definition. + #[prost(enumeration = "Scheme", repeated, tag = "10")] + pub schemes: ::prost::alloc::vec::Vec, + /// Declares this operation to be deprecated. Usage of the declared operation + /// should be refrained. Default value is false. + #[prost(bool, tag = "11")] + pub deprecated: bool, + /// A declaration of which security schemes are applied for this operation. The + /// list of values describes alternative security schemes that can be used + /// (that is, there is a logical OR between the security requirements). This + /// definition overrides any declared top-level security. To remove a top-level + /// security declaration, an empty array can be used. + #[prost(message, repeated, tag = "12")] + pub security: ::prost::alloc::vec::Vec, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "13")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, + /// Custom parameters such as HTTP request headers. + /// See: + /// and + #[prost(message, optional, tag = "14")] + pub parameters: ::core::option::Option, +} +/// `Parameters` is a representation of OpenAPI v2 specification's parameters object. +/// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only +/// allow header parameters to be set here since we do not want users specifying custom non-header +/// parameters beyond those inferred from the Protobuf schema. +/// See: +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Parameters { + /// `Headers` is one or more HTTP header parameter. + /// See: + #[prost(message, repeated, tag = "1")] + pub headers: ::prost::alloc::vec::Vec, +} +/// `HeaderParameter` a HTTP header parameter. +/// See: +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HeaderParameter { + /// `Name` is the header name. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// `Description` is a short description of the header. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + /// See: + #[prost(enumeration = "header_parameter::Type", tag = "3")] + pub r#type: i32, + /// `Format` The extending format for the previously mentioned type. + #[prost(string, tag = "4")] + pub format: ::prost::alloc::string::String, + /// `Required` indicates if the header is optional + #[prost(bool, tag = "5")] + pub required: bool, +} +/// Nested message and enum types in `HeaderParameter`. +pub mod header_parameter { + /// `Type` is a a supported HTTP header type. + /// See + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Type { + Unknown = 0, + String = 1, + Number = 2, + Integer = 3, + Boolean = 4, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Unknown => "UNKNOWN", + Type::String => "STRING", + Type::Number => "NUMBER", + Type::Integer => "INTEGER", + Type::Boolean => "BOOLEAN", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "STRING" => Some(Self::String), + "NUMBER" => Some(Self::Number), + "INTEGER" => Some(Self::Integer), + "BOOLEAN" => Some(Self::Boolean), + _ => None, + } + } + } +} +/// `Header` is a representation of OpenAPI v2 specification's Header object. +/// +/// See: +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Header { + /// `Description` is a short description of the header. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. + #[prost(string, tag = "2")] + pub r#type: ::prost::alloc::string::String, + /// `Format` The extending format for the previously mentioned type. + #[prost(string, tag = "3")] + pub format: ::prost::alloc::string::String, + /// `Default` Declares the value of the header that the server will use if none is provided. + /// See: + /// Unlike JSON Schema this value MUST conform to the defined type for the header. + #[prost(string, tag = "6")] + pub default: ::prost::alloc::string::String, + /// 'Pattern' See + #[prost(string, tag = "13")] + pub pattern: ::prost::alloc::string::String, +} +/// `Response` is a representation of OpenAPI v2 specification's Response object. +/// +/// See: +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Response { + /// `Description` is a short description of the response. + /// GFM syntax can be used for rich text representation. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// `Schema` optionally defines the structure of the response. + /// If `Schema` is not provided, it means there is no content to the response. + #[prost(message, optional, tag = "2")] + pub schema: ::core::option::Option, + /// `Headers` A list of headers that are sent with the response. + /// `Header` name is expected to be a string in the canonical format of the MIME header key + /// See: + #[prost(map = "string, message", tag = "3")] + pub headers: ::std::collections::HashMap<::prost::alloc::string::String, Header>, + /// `Examples` gives per-mimetype response examples. + /// See: + #[prost(map = "string, string", tag = "4")] + pub examples: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "5")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// `Info` is a representation of OpenAPI v2 specification's Info object. +/// +/// See: +/// +/// Example: +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +/// info: { +/// title: "Echo API"; +/// version: "1.0"; +/// description: ""; +/// contact: { +/// name: "gRPC-Gateway project"; +/// url: " +/// email: "none@example.com"; +/// }; +/// license: { +/// name: "BSD 3-Clause License"; +/// url: " +/// }; +/// }; +/// ... +/// }; +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Info { + /// The title of the application. + #[prost(string, tag = "1")] + pub title: ::prost::alloc::string::String, + /// A short description of the application. GFM syntax can be used for rich + /// text representation. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// The Terms of Service for the API. + #[prost(string, tag = "3")] + pub terms_of_service: ::prost::alloc::string::String, + /// The contact information for the exposed API. + #[prost(message, optional, tag = "4")] + pub contact: ::core::option::Option, + /// The license information for the exposed API. + #[prost(message, optional, tag = "5")] + pub license: ::core::option::Option, + /// Provides the version of the application API (not to be confused + /// with the specification version). + #[prost(string, tag = "6")] + pub version: ::prost::alloc::string::String, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "7")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// `Contact` is a representation of OpenAPI v2 specification's Contact object. +/// +/// See: +/// +/// Example: +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +/// info: { +/// ... +/// contact: { +/// name: "gRPC-Gateway project"; +/// url: " +/// email: "none@example.com"; +/// }; +/// ... +/// }; +/// ... +/// }; +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Contact { + /// The identifying name of the contact person/organization. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// The URL pointing to the contact information. MUST be in the format of a + /// URL. + #[prost(string, tag = "2")] + pub url: ::prost::alloc::string::String, + /// The email address of the contact person/organization. MUST be in the format + /// of an email address. + #[prost(string, tag = "3")] + pub email: ::prost::alloc::string::String, +} +/// `License` is a representation of OpenAPI v2 specification's License object. +/// +/// See: +/// +/// Example: +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +/// info: { +/// ... +/// license: { +/// name: "BSD 3-Clause License"; +/// url: " +/// }; +/// ... +/// }; +/// ... +/// }; +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct License { + /// The license name used for the API. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// A URL to the license used for the API. MUST be in the format of a URL. + #[prost(string, tag = "2")] + pub url: ::prost::alloc::string::String, +} +/// `ExternalDocumentation` is a representation of OpenAPI v2 specification's +/// ExternalDocumentation object. +/// +/// See: +/// +/// Example: +/// +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { +/// ... +/// external_docs: { +/// description: "More about gRPC-Gateway"; +/// url: " +/// } +/// ... +/// }; +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExternalDocumentation { + /// A short description of the target documentation. GFM syntax can be used for + /// rich text representation. + #[prost(string, tag = "1")] + pub description: ::prost::alloc::string::String, + /// The URL for the target documentation. Value MUST be in the format + /// of a URL. + #[prost(string, tag = "2")] + pub url: ::prost::alloc::string::String, +} +/// `Schema` is a representation of OpenAPI v2 specification's Schema object. +/// +/// See: +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Schema { + #[prost(message, optional, tag = "1")] + pub json_schema: ::core::option::Option, + /// Adds support for polymorphism. The discriminator is the schema property + /// name that is used to differentiate between other schema that inherit this + /// schema. The property name used MUST be defined at this schema and it MUST + /// be in the required property list. When used, the value MUST be the name of + /// this schema or any schema that inherits it. + #[prost(string, tag = "2")] + pub discriminator: ::prost::alloc::string::String, + /// Relevant only for Schema "properties" definitions. Declares the property as + /// "read only". This means that it MAY be sent as part of a response but MUST + /// NOT be sent as part of the request. Properties marked as readOnly being + /// true SHOULD NOT be in the required list of the defined schema. Default + /// value is false. + #[prost(bool, tag = "3")] + pub read_only: bool, + /// Additional external documentation for this schema. + #[prost(message, optional, tag = "5")] + pub external_docs: ::core::option::Option, + /// A free-form property to include an example of an instance for this schema in JSON. + /// This is copied verbatim to the output. + #[prost(string, tag = "6")] + pub example: ::prost::alloc::string::String, +} +/// `JSONSchema` represents properties from JSON Schema taken, and as used, in +/// the OpenAPI v2 spec. +/// +/// This includes changes made by OpenAPI v2. +/// +/// See: +/// +/// See also: +/// +/// +/// Example: +/// +/// message SimpleMessage { +/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { +/// json_schema: { +/// title: "SimpleMessage" +/// description: "A simple message." +/// required: \["id"\] +/// } +/// }; +/// +/// // Id represents the message identifier. +/// string id = 1; [ +/// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { +/// description: "The unique identifier of the simple message." +/// }]; +/// } +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct JsonSchema { + /// Ref is used to define an external reference to include in the message. + /// This could be a fully qualified proto message reference, and that type must + /// be imported into the protofile. If no message is identified, the Ref will + /// be used verbatim in the output. + /// For example: + /// `ref: ".google.protobuf.Timestamp"`. + #[prost(string, tag = "3")] + pub r#ref: ::prost::alloc::string::String, + /// The title of the schema. + #[prost(string, tag = "5")] + pub title: ::prost::alloc::string::String, + /// A short description of the schema. + #[prost(string, tag = "6")] + pub description: ::prost::alloc::string::String, + #[prost(string, tag = "7")] + pub default: ::prost::alloc::string::String, + #[prost(bool, tag = "8")] + pub read_only: bool, + /// A free-form property to include a JSON example of this field. This is copied + /// verbatim to the output swagger.json. Quotes must be escaped. + /// This property is the same for 2.0 and 3.0.0 + #[prost(string, tag = "9")] + pub example: ::prost::alloc::string::String, + #[prost(double, tag = "10")] + pub multiple_of: f64, + /// Maximum represents an inclusive upper limit for a numeric instance. The + /// value of MUST be a number, + #[prost(double, tag = "11")] + pub maximum: f64, + #[prost(bool, tag = "12")] + pub exclusive_maximum: bool, + /// minimum represents an inclusive lower limit for a numeric instance. The + /// value of MUST be a number, + #[prost(double, tag = "13")] + pub minimum: f64, + #[prost(bool, tag = "14")] + pub exclusive_minimum: bool, + #[prost(uint64, tag = "15")] + pub max_length: u64, + #[prost(uint64, tag = "16")] + pub min_length: u64, + #[prost(string, tag = "17")] + pub pattern: ::prost::alloc::string::String, + #[prost(uint64, tag = "20")] + pub max_items: u64, + #[prost(uint64, tag = "21")] + pub min_items: u64, + #[prost(bool, tag = "22")] + pub unique_items: bool, + #[prost(uint64, tag = "24")] + pub max_properties: u64, + #[prost(uint64, tag = "25")] + pub min_properties: u64, + #[prost(string, repeated, tag = "26")] + pub required: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Items in 'array' must be unique. + #[prost(string, repeated, tag = "34")] + pub array: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(enumeration = "json_schema::JsonSchemaSimpleTypes", repeated, tag = "35")] + pub r#type: ::prost::alloc::vec::Vec, + /// `Format` + #[prost(string, tag = "36")] + pub format: ::prost::alloc::string::String, + /// Items in `enum` must be unique + #[prost(string, repeated, tag = "46")] + pub r#enum: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// Additional field level properties used when generating the OpenAPI v2 file. + #[prost(message, optional, tag = "1001")] + pub field_configuration: ::core::option::Option, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "48")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// Nested message and enum types in `JSONSchema`. +pub mod json_schema { + /// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. + /// These properties are not defined by OpenAPIv2, but they are used to control the generation. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct FieldConfiguration { + /// Alternative parameter name when used as path parameter. If set, this will + /// be used as the complete parameter name when this field is used as a path + /// parameter. Use this to avoid having auto generated path parameter names + /// for overlapping paths. + #[prost(string, tag = "47")] + pub path_param_name: ::prost::alloc::string::String, + } + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum JsonSchemaSimpleTypes { + Unknown = 0, + Array = 1, + Boolean = 2, + Integer = 3, + Null = 4, + Number = 5, + Object = 6, + String = 7, + } + impl JsonSchemaSimpleTypes { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + JsonSchemaSimpleTypes::Unknown => "UNKNOWN", + JsonSchemaSimpleTypes::Array => "ARRAY", + JsonSchemaSimpleTypes::Boolean => "BOOLEAN", + JsonSchemaSimpleTypes::Integer => "INTEGER", + JsonSchemaSimpleTypes::Null => "NULL", + JsonSchemaSimpleTypes::Number => "NUMBER", + JsonSchemaSimpleTypes::Object => "OBJECT", + JsonSchemaSimpleTypes::String => "STRING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "ARRAY" => Some(Self::Array), + "BOOLEAN" => Some(Self::Boolean), + "INTEGER" => Some(Self::Integer), + "NULL" => Some(Self::Null), + "NUMBER" => Some(Self::Number), + "OBJECT" => Some(Self::Object), + "STRING" => Some(Self::String), + _ => None, + } + } + } +} +/// `Tag` is a representation of OpenAPI v2 specification's Tag object. +/// +/// See: +/// +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tag { + /// The name of the tag. Use it to allow override of the name of a + /// global Tag object, then use that name to reference the tag throughout the + /// OpenAPI file. + #[prost(string, tag = "1")] + pub name: ::prost::alloc::string::String, + /// A short description for the tag. GFM syntax can be used for rich text + /// representation. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// Additional external documentation for this tag. + #[prost(message, optional, tag = "3")] + pub external_docs: ::core::option::Option, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "4")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// `SecurityDefinitions` is a representation of OpenAPI v2 specification's +/// Security Definitions object. +/// +/// See: +/// +/// A declaration of the security schemes available to be used in the +/// specification. This does not enforce the security schemes on the operations +/// and only serves to provide the relevant details for each scheme. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityDefinitions { + /// A single security scheme definition, mapping a "name" to the scheme it + /// defines. + #[prost(map = "string, message", tag = "1")] + pub security: ::std::collections::HashMap< + ::prost::alloc::string::String, + SecurityScheme, + >, +} +/// `SecurityScheme` is a representation of OpenAPI v2 specification's +/// Security Scheme object. +/// +/// See: +/// +/// Allows the definition of a security scheme that can be used by the +/// operations. Supported schemes are basic authentication, an API key (either as +/// a header or as a query parameter) and OAuth2's common flows (implicit, +/// password, application and access code). +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityScheme { + /// The type of the security scheme. Valid values are "basic", + /// "apiKey" or "oauth2". + #[prost(enumeration = "security_scheme::Type", tag = "1")] + pub r#type: i32, + /// A short description for security scheme. + #[prost(string, tag = "2")] + pub description: ::prost::alloc::string::String, + /// The name of the header or query parameter to be used. + /// Valid for apiKey. + #[prost(string, tag = "3")] + pub name: ::prost::alloc::string::String, + /// The location of the API key. Valid values are "query" or + /// "header". + /// Valid for apiKey. + #[prost(enumeration = "security_scheme::In", tag = "4")] + pub r#in: i32, + /// The flow used by the OAuth2 security scheme. Valid values are + /// "implicit", "password", "application" or "accessCode". + /// Valid for oauth2. + #[prost(enumeration = "security_scheme::Flow", tag = "5")] + pub flow: i32, + /// The authorization URL to be used for this flow. This SHOULD be in + /// the form of a URL. + /// Valid for oauth2/implicit and oauth2/accessCode. + #[prost(string, tag = "6")] + pub authorization_url: ::prost::alloc::string::String, + /// The token URL to be used for this flow. This SHOULD be in the + /// form of a URL. + /// Valid for oauth2/password, oauth2/application and oauth2/accessCode. + #[prost(string, tag = "7")] + pub token_url: ::prost::alloc::string::String, + /// The available scopes for the OAuth2 security scheme. + /// Valid for oauth2. + #[prost(message, optional, tag = "8")] + pub scopes: ::core::option::Option, + /// Custom properties that start with "x-" such as "x-foo" used to describe + /// extra functionality that is not covered by the standard OpenAPI Specification. + /// See: + #[prost(map = "string, message", tag = "9")] + pub extensions: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost_types::Value, + >, +} +/// Nested message and enum types in `SecurityScheme`. +pub mod security_scheme { + /// The type of the security scheme. Valid values are "basic", + /// "apiKey" or "oauth2". + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Type { + Invalid = 0, + Basic = 1, + ApiKey = 2, + Oauth2 = 3, + } + impl Type { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Type::Invalid => "TYPE_INVALID", + Type::Basic => "TYPE_BASIC", + Type::ApiKey => "TYPE_API_KEY", + Type::Oauth2 => "TYPE_OAUTH2", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "TYPE_INVALID" => Some(Self::Invalid), + "TYPE_BASIC" => Some(Self::Basic), + "TYPE_API_KEY" => Some(Self::ApiKey), + "TYPE_OAUTH2" => Some(Self::Oauth2), + _ => None, + } + } + } + /// The location of the API key. Valid values are "query" or "header". + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum In { + Invalid = 0, + Query = 1, + Header = 2, + } + impl In { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + In::Invalid => "IN_INVALID", + In::Query => "IN_QUERY", + In::Header => "IN_HEADER", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "IN_INVALID" => Some(Self::Invalid), + "IN_QUERY" => Some(Self::Query), + "IN_HEADER" => Some(Self::Header), + _ => None, + } + } + } + /// The flow used by the OAuth2 security scheme. Valid values are + /// "implicit", "password", "application" or "accessCode". + #[derive( + Clone, + Copy, + Debug, + PartialEq, + Eq, + Hash, + PartialOrd, + Ord, + ::prost::Enumeration + )] + #[repr(i32)] + pub enum Flow { + Invalid = 0, + Implicit = 1, + Password = 2, + Application = 3, + AccessCode = 4, + } + impl Flow { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Flow::Invalid => "FLOW_INVALID", + Flow::Implicit => "FLOW_IMPLICIT", + Flow::Password => "FLOW_PASSWORD", + Flow::Application => "FLOW_APPLICATION", + Flow::AccessCode => "FLOW_ACCESS_CODE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "FLOW_INVALID" => Some(Self::Invalid), + "FLOW_IMPLICIT" => Some(Self::Implicit), + "FLOW_PASSWORD" => Some(Self::Password), + "FLOW_APPLICATION" => Some(Self::Application), + "FLOW_ACCESS_CODE" => Some(Self::AccessCode), + _ => None, + } + } + } +} +/// `SecurityRequirement` is a representation of OpenAPI v2 specification's +/// Security Requirement object. +/// +/// See: +/// +/// Lists the required security schemes to execute this operation. The object can +/// have multiple security schemes declared in it which are all required (that +/// is, there is a logical AND between the schemes). +/// +/// The name used for each property MUST correspond to a security scheme +/// declared in the Security Definitions. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SecurityRequirement { + /// Each name must correspond to a security scheme which is declared in + /// the Security Definitions. If the security scheme is of type "oauth2", + /// then the value is a list of scope names required for the execution. + /// For other security scheme types, the array MUST be empty. + #[prost(map = "string, message", tag = "1")] + pub security_requirement: ::std::collections::HashMap< + ::prost::alloc::string::String, + security_requirement::SecurityRequirementValue, + >, +} +/// Nested message and enum types in `SecurityRequirement`. +pub mod security_requirement { + /// If the security scheme is of type "oauth2", then the value is a list of + /// scope names required for the execution. For other security scheme types, + /// the array MUST be empty. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct SecurityRequirementValue { + #[prost(string, repeated, tag = "1")] + pub scope: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + } +} +/// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. +/// +/// See: +/// +/// Lists the available scopes for an OAuth2 security scheme. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Scopes { + /// Maps between a name of a scope to a short description of it (as the value + /// of the property). + #[prost(map = "string, string", tag = "1")] + pub scope: ::std::collections::HashMap< + ::prost::alloc::string::String, + ::prost::alloc::string::String, + >, +} +/// Scheme describes the schemes supported by the OpenAPI Swagger +/// and Operation objects. +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Scheme { + Unknown = 0, + Http = 1, + Https = 2, + Ws = 3, + Wss = 4, +} +impl Scheme { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Scheme::Unknown => "UNKNOWN", + Scheme::Http => "HTTP", + Scheme::Https => "HTTPS", + Scheme::Ws => "WS", + Scheme::Wss => "WSS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN" => Some(Self::Unknown), + "HTTP" => Some(Self::Http), + "HTTPS" => Some(Self::Https), + "WS" => Some(Self::Ws), + "WSS" => Some(Self::Wss), + _ => None, + } + } +} From 3f8b882d7a85fdd7c585fd060335c6591f9db11e Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 5 Apr 2024 00:58:39 +0800 Subject: [PATCH 03/16] [wip]refactor: add regression test chart Signed-off-by: Austin Liu --- flyrs/test_flytekit_remote.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/flyrs/test_flytekit_remote.py b/flyrs/test_flytekit_remote.py index 6415226053..f62d72accb 100644 --- a/flyrs/test_flytekit_remote.py +++ b/flyrs/test_flytekit_remote.py @@ -1,4 +1,5 @@ import timeit +import matplotlib.pyplot as plt from flytekit.configuration import Config from flytekit.remote import FlyteRemote @@ -32,7 +33,20 @@ fetch_task_in_py = """task_py = remote_py.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" fetch_task_in_rs = """task_rs = remote_rs.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" -# Python gRPC -print(sum(timeit.repeat(fetch_task_in_py, setup=setup, repeat=10, number=100))) -# Rust gRPC -print(sum(timeit.repeat(fetch_task_in_rs, setup=setup, repeat=10, number=100))) + +r = 10 +Xs = [1, 10, 100, 1000] +py_elpased, rs_elpased = [], [] +for x in Xs: + # Python gRPC + py_elpased.append(sum(timeit.repeat(fetch_task_in_py, setup=setup, repeat=r, number=x))/r) + print() + # Rust gRPC + rs_elpased.append(sum(timeit.repeat(fetch_task_in_rs, setup=setup, repeat=r, number=x))/r) + print() +plt.xlabel('# of fetched tasks') +plt.ylabel('average elapsed time (s)') +plt.plot(Xs, py_elpased,'r-',label='Python gRPC') +plt.plot(Xs, rs_elpased,'b-',label='Rust gRPC') +plt.legend() +plt.savefig("perf.png") \ No newline at end of file From 2739cb84cff9d50da081a34832b5e7d80b2993da Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 5 Apr 2024 04:44:44 +0800 Subject: [PATCH 04/16] [wip]refactor: clean up Signed-off-by: Austin Liu --- flyrs/src/gen/pb_rust/flyteidl/Cargo.toml | 15 ++++++++++ flyrs/src/gen/pb_rust/flyteidl/lib.rs | 27 ++++++++++++++++++ flyrs/src/lib.rs | 34 ++--------------------- 3 files changed, 44 insertions(+), 32 deletions(-) create mode 100644 flyrs/src/gen/pb_rust/flyteidl/Cargo.toml create mode 100644 flyrs/src/gen/pb_rust/flyteidl/lib.rs diff --git a/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml b/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml new file mode 100644 index 0000000000..ee15f0bffb --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "flyteidl" +version = "0.1.0" +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +prost = "0.12.3" +prost-types = "0.12.3" +tonic = "0.11.0" + +[lib] +name = "flyteidl" +path = "lib.rs" diff --git a/flyrs/src/gen/pb_rust/flyteidl/lib.rs b/flyrs/src/gen/pb_rust/flyteidl/lib.rs new file mode 100644 index 0000000000..7720f6fdc4 --- /dev/null +++ b/flyrs/src/gen/pb_rust/flyteidl/lib.rs @@ -0,0 +1,27 @@ +pub mod datacatalog { + include!("datacatalog.rs"); +} +pub mod flyteidl { + + pub mod admin { + include!("flyteidl.admin.rs"); + } + pub mod cache { + include!("flyteidl.cacheservice.rs"); + } + pub mod core { + include!("flyteidl.core.rs"); + } + pub mod event { + include!("flyteidl.event.rs"); + } + pub mod plugins { + include!("flyteidl.plugins.rs"); + pub mod kubeflow{ + include!("flyteidl.plugins.kubeflow.rs"); + } + } + pub mod service { + include!("flyteidl.service.rs"); + } +} \ No newline at end of file diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 3732282393..5fda151984 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -4,38 +4,8 @@ use pyo3::types::PyBytes; use tokio::runtime::{Builder, Runtime}; use tonic::{transport::{Channel}}; -// We use env macro here, typically cannot have executable code, like `std:dev is_ok()`` in this case, directly in the global scope outside of function bodies in Rust. -// Need better error handling if environment variable is empty -pub mod datacatalog { - include!(concat!(env!("PB_OUT_DIR"), "datacatalog.rs")); -} -pub mod flyteidl { - pub mod admin { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.admin.rs")); - } - pub mod cache { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.cacheservice.rs")); - } - pub mod core { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.core.rs")); - } - pub mod event { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.event.rs")); - } - pub mod plugins { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.plugins.rs")); - pub mod kubeflow{ - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.plugins.kubeflow.rs")); - } - } - pub mod service { - include!(concat!(env!("PB_OUT_DIR"), "flyteidl.service.rs")); - } -} - - -use crate::flyteidl::service::{TaskGetResponse, admin_service_client::AdminServiceClient, signal_service_client, data_proxy_service_client}; -use crate::flyteidl::admin::{Task, ObjectGetRequest, ResourceListRequest, TaskExecutionGetRequest}; +use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; +use flyteidl::flyteidl::admin::{Task, ObjectGetRequest, ResourceListRequest, TaskExecutionGetRequest}; // Unlike the normal use case of PyO3, we don't have to add attribute macros such as #[pyclass] or #[pymethods] to all of our flyteidl structs. // In this case, we only use PyO3 to expose the client class and its methods to Python (FlyteKit). From 81c0183b5e779eca5a74e293e02aa9c634eb2a4c Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 5 Apr 2024 04:46:02 +0800 Subject: [PATCH 05/16] [wip]refactor: clean up Signed-off-by: Austin Liu --- flyrs/Cargo.toml | 6 +- flyrs/build.rs | 41 - flyrs/perf.png | Bin 0 -> 30593 bytes flyrs/protos/flyteidl/admin/agent.proto | 254 ---- .../flyteidl/admin/cluster_assignment.proto | 11 - flyrs/protos/flyteidl/admin/common.proto | 327 ---- .../flyteidl/admin/description_entity.proto | 95 -- flyrs/protos/flyteidl/admin/event.proto | 70 - flyrs/protos/flyteidl/admin/execution.proto | 424 ------ flyrs/protos/flyteidl/admin/launch_plan.proto | 222 --- .../flyteidl/admin/matchable_resource.proto | 190 --- .../flyteidl/admin/node_execution.proto | 245 --- .../protos/flyteidl/admin/notification.proto | 27 - flyrs/protos/flyteidl/admin/project.proto | 110 -- .../flyteidl/admin/project_attributes.proto | 69 - .../admin/project_domain_attributes.proto | 80 - flyrs/protos/flyteidl/admin/schedule.proto | 43 - flyrs/protos/flyteidl/admin/signal.proto | 86 -- flyrs/protos/flyteidl/admin/task.proto | 71 - .../flyteidl/admin/task_execution.proto | 168 --- flyrs/protos/flyteidl/admin/version.proto | 27 - flyrs/protos/flyteidl/admin/workflow.proto | 92 -- .../flyteidl/admin/workflow_attributes.proto | 89 -- .../flyteidl/cacheservice/cacheservice.proto | 143 -- flyrs/protos/flyteidl/core/artifact_id.proto | 112 -- flyrs/protos/flyteidl/core/catalog.proto | 63 - flyrs/protos/flyteidl/core/compiler.proto | 64 - flyrs/protos/flyteidl/core/condition.proto | 63 - flyrs/protos/flyteidl/core/dynamic_job.proto | 32 - flyrs/protos/flyteidl/core/errors.proto | 35 - flyrs/protos/flyteidl/core/execution.proto | 116 -- flyrs/protos/flyteidl/core/identifier.proto | 80 - flyrs/protos/flyteidl/core/interface.proto | 64 - flyrs/protos/flyteidl/core/literals.proto | 183 --- flyrs/protos/flyteidl/core/metrics.proto | 50 - flyrs/protos/flyteidl/core/security.proto | 130 -- flyrs/protos/flyteidl/core/tasks.proto | 351 ----- flyrs/protos/flyteidl/core/types.proto | 208 --- flyrs/protos/flyteidl/core/workflow.proto | 315 ---- .../flyteidl/core/workflow_closure.proto | 18 - .../flyteidl/datacatalog/datacatalog.proto | 420 ------ flyrs/protos/flyteidl/event/cloudevents.proto | 73 - flyrs/protos/flyteidl/event/event.proto | 315 ---- flyrs/protos/flyteidl/plugins/array_job.proto | 30 - flyrs/protos/flyteidl/plugins/dask.proto | 41 - .../flyteidl/plugins/kubeflow/common.proto | 33 - .../flyteidl/plugins/kubeflow/mpi.proto | 43 - .../flyteidl/plugins/kubeflow/pytorch.proto | 49 - .../plugins/kubeflow/tensorflow.proto | 42 - flyrs/protos/flyteidl/plugins/mpi.proto | 20 - flyrs/protos/flyteidl/plugins/presto.proto | 14 - flyrs/protos/flyteidl/plugins/pytorch.proto | 25 - flyrs/protos/flyteidl/plugins/qubole.proto | 26 - flyrs/protos/flyteidl/plugins/ray.proto | 50 - flyrs/protos/flyteidl/plugins/spark.proto | 34 - .../protos/flyteidl/plugins/tensorflow.proto | 18 - flyrs/protos/flyteidl/plugins/waitable.proto | 15 - flyrs/protos/flyteidl/service/admin.proto | 659 --------- flyrs/protos/flyteidl/service/agent.proto | 79 - flyrs/protos/flyteidl/service/auth.proto | 94 -- flyrs/protos/flyteidl/service/dataproxy.proto | 205 --- .../service/external_plugin_service.proto | 79 - flyrs/protos/flyteidl/service/identity.proto | 51 - flyrs/protos/flyteidl/service/signal.proto | 55 - flyrs/protos/google/api/annotations.proto | 31 - flyrs/protos/google/api/client.proto | 99 -- flyrs/protos/google/api/field_behavior.proto | 84 -- flyrs/protos/google/api/http.proto | 375 ----- flyrs/protos/google/api/resource.proto | 299 ---- flyrs/protos/google/pubsub/v1/pubsub.proto | 1316 ----------------- flyrs/protos/google/pubsub/v1/schema.proto | 289 ---- .../options/annotations.proto | 44 - .../options/openapiv2.proto | 720 --------- flyrs/setup.sh | 1 - 74 files changed, 4 insertions(+), 10498 deletions(-) delete mode 100644 flyrs/build.rs create mode 100644 flyrs/perf.png delete mode 100644 flyrs/protos/flyteidl/admin/agent.proto delete mode 100644 flyrs/protos/flyteidl/admin/cluster_assignment.proto delete mode 100644 flyrs/protos/flyteidl/admin/common.proto delete mode 100644 flyrs/protos/flyteidl/admin/description_entity.proto delete mode 100644 flyrs/protos/flyteidl/admin/event.proto delete mode 100644 flyrs/protos/flyteidl/admin/execution.proto delete mode 100644 flyrs/protos/flyteidl/admin/launch_plan.proto delete mode 100644 flyrs/protos/flyteidl/admin/matchable_resource.proto delete mode 100644 flyrs/protos/flyteidl/admin/node_execution.proto delete mode 100644 flyrs/protos/flyteidl/admin/notification.proto delete mode 100644 flyrs/protos/flyteidl/admin/project.proto delete mode 100644 flyrs/protos/flyteidl/admin/project_attributes.proto delete mode 100644 flyrs/protos/flyteidl/admin/project_domain_attributes.proto delete mode 100644 flyrs/protos/flyteidl/admin/schedule.proto delete mode 100644 flyrs/protos/flyteidl/admin/signal.proto delete mode 100644 flyrs/protos/flyteidl/admin/task.proto delete mode 100644 flyrs/protos/flyteidl/admin/task_execution.proto delete mode 100644 flyrs/protos/flyteidl/admin/version.proto delete mode 100644 flyrs/protos/flyteidl/admin/workflow.proto delete mode 100644 flyrs/protos/flyteidl/admin/workflow_attributes.proto delete mode 100644 flyrs/protos/flyteidl/cacheservice/cacheservice.proto delete mode 100644 flyrs/protos/flyteidl/core/artifact_id.proto delete mode 100644 flyrs/protos/flyteidl/core/catalog.proto delete mode 100644 flyrs/protos/flyteidl/core/compiler.proto delete mode 100644 flyrs/protos/flyteidl/core/condition.proto delete mode 100644 flyrs/protos/flyteidl/core/dynamic_job.proto delete mode 100644 flyrs/protos/flyteidl/core/errors.proto delete mode 100644 flyrs/protos/flyteidl/core/execution.proto delete mode 100644 flyrs/protos/flyteidl/core/identifier.proto delete mode 100644 flyrs/protos/flyteidl/core/interface.proto delete mode 100644 flyrs/protos/flyteidl/core/literals.proto delete mode 100644 flyrs/protos/flyteidl/core/metrics.proto delete mode 100644 flyrs/protos/flyteidl/core/security.proto delete mode 100644 flyrs/protos/flyteidl/core/tasks.proto delete mode 100644 flyrs/protos/flyteidl/core/types.proto delete mode 100644 flyrs/protos/flyteidl/core/workflow.proto delete mode 100644 flyrs/protos/flyteidl/core/workflow_closure.proto delete mode 100644 flyrs/protos/flyteidl/datacatalog/datacatalog.proto delete mode 100644 flyrs/protos/flyteidl/event/cloudevents.proto delete mode 100644 flyrs/protos/flyteidl/event/event.proto delete mode 100644 flyrs/protos/flyteidl/plugins/array_job.proto delete mode 100644 flyrs/protos/flyteidl/plugins/dask.proto delete mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/common.proto delete mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto delete mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto delete mode 100644 flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto delete mode 100644 flyrs/protos/flyteidl/plugins/mpi.proto delete mode 100644 flyrs/protos/flyteidl/plugins/presto.proto delete mode 100644 flyrs/protos/flyteidl/plugins/pytorch.proto delete mode 100644 flyrs/protos/flyteidl/plugins/qubole.proto delete mode 100644 flyrs/protos/flyteidl/plugins/ray.proto delete mode 100644 flyrs/protos/flyteidl/plugins/spark.proto delete mode 100644 flyrs/protos/flyteidl/plugins/tensorflow.proto delete mode 100644 flyrs/protos/flyteidl/plugins/waitable.proto delete mode 100644 flyrs/protos/flyteidl/service/admin.proto delete mode 100644 flyrs/protos/flyteidl/service/agent.proto delete mode 100644 flyrs/protos/flyteidl/service/auth.proto delete mode 100644 flyrs/protos/flyteidl/service/dataproxy.proto delete mode 100644 flyrs/protos/flyteidl/service/external_plugin_service.proto delete mode 100644 flyrs/protos/flyteidl/service/identity.proto delete mode 100644 flyrs/protos/flyteidl/service/signal.proto delete mode 100644 flyrs/protos/google/api/annotations.proto delete mode 100644 flyrs/protos/google/api/client.proto delete mode 100644 flyrs/protos/google/api/field_behavior.proto delete mode 100644 flyrs/protos/google/api/http.proto delete mode 100644 flyrs/protos/google/api/resource.proto delete mode 100644 flyrs/protos/google/pubsub/v1/pubsub.proto delete mode 100644 flyrs/protos/google/pubsub/v1/schema.proto delete mode 100644 flyrs/protos/protoc-gen-openapiv2/options/annotations.proto delete mode 100644 flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto delete mode 100644 flyrs/setup.sh diff --git a/flyrs/Cargo.toml b/flyrs/Cargo.toml index e4ca144ba9..5860a05679 100644 --- a/flyrs/Cargo.toml +++ b/flyrs/Cargo.toml @@ -1,6 +1,7 @@ +workspace = { members = ["src/gen/pb_rust/flyteidl"] } [package] name = "flyrs" -version = "0.2.0" +version = "0.1.0" edition = "2021" [lib] @@ -18,6 +19,7 @@ tokio = { version = "1.9", features = ["full"] } pyo3 = { version = "0.21", features = ["extension-module", "experimental-async"] } pyo3-asyncio = { version = "0.14", features = ["tokio-runtime"] } prost-types = "0.12.3" +flyteidl = { path="src/gen/pb_rust/flyteidl" } [build-dependencies] -tonic-build = "0.11.0" + diff --git a/flyrs/build.rs b/flyrs/build.rs deleted file mode 100644 index a4cf7ec170..0000000000 --- a/flyrs/build.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::fs; -use std::path::{PathBuf}; - -fn main() -> Result<(), Box> { - let proto_package_dirs:Vec<&str> = ["protos/flyteidl/admin/", "protos/flyteidl/cacheservice/", "protos/flyteidl/core/", "protos/flyteidl/datacatalog/", "protos/flyteidl/event/", "protos/flyteidl/plugins/", "protos/flyteidl/service/"].to_vec(); - let out_dir = concat!("src/", env!("PB_OUT_DIR")); // Avoid using `OUT_DIR`. It's already used by tonic_build and will have side effects in the target build folder. - for package_dir in proto_package_dirs.iter() { - let proto_files = find_proto_files(package_dir)?; - let proto_files_paths: Vec<&str> =proto_files.iter().map(|path| path.to_str().unwrap()).collect(); - println!("{}", format!("{:?}", proto_files_paths)); - - tonic_build::configure() - .build_server(false) - // .compile_well_known_types(true) // Defaults to false. Enable it if you don't want tonic_build to handle Well-known types by adding the `prost-types` crate automatically. - .out_dir(out_dir) - .compile( - &proto_files_paths, - &["protos/"], // same as arg `-I`` in `protoc`, it's the root folder when impoting other *.proto files. - )?; - } - Ok(()) -} - -fn find_proto_files(dir: &str) -> Result, std::io::Error> { - let mut proto_files = Vec::new(); - if let Ok(entries) = fs::read_dir(dir) { - for entry in entries { - if let Ok(entry) = entry { - let path = entry.path(); - if path.is_file() && path.extension().map_or(false, |ext| ext == "proto") { - proto_files.push(path); - } else if path.is_dir() { - if let Ok(mut nested_proto_files) = find_proto_files(&path.to_str().unwrap()) { - proto_files.append(&mut nested_proto_files); - } - } - } - } - } - Ok(proto_files) -} diff --git a/flyrs/perf.png b/flyrs/perf.png new file mode 100644 index 0000000000000000000000000000000000000000..26e384d6aca8ca1dc83a960d2d3a8d2147e66b20 GIT binary patch literal 30593 zcmeFZc{r49_&z=e*gD8k_c+Sd!_(2#MOIo~ z`mE$}2QM!-PemCS=l^^>gJo z+k2xa(Y6c8#a*we3P%O{yZuuaIy#b*_xAmjW>(y~0v0`0wj4ch=CgOqnz=$E;6ERF znLN6Gn3{r+cXb=tFr_FI~FychBEPi+PW(c2JN0-G80`YyUUMbnZ5D-Lk28T@({0{+`W4jZeeWVOO#%cpty;Z)p5Ah=KlWvIRl8K zD<=`6%ox0`i3!)p$jDS{0=1>3W%;TmIb8MbU3uSCVQXt^HgSU{8!=(_zhU}H4SwN< z9e(oU$v<3Nzr7UVFH;^lGB!EcSRXT8@3H3N^_J$!{TO$;{O&nDWs;R(&nPj_C@bDw># z;Awa2)`zCwJvrx(nW?E6&{JNlYPN}0zN)5nWN(u&`Ru~T*eA@$#QxT(clAeMjD0sh z+}rkCUS>p0%-b6|SDTuegym<9wq|mZEMA*pT+X@tdNtji#Cq&lTU{_kvMO3d^^oZ4 zffi=_=jE$zeA;l4Zr}SJq}a zg#vch(~#aVmqTgEuGjke!ehrOt!h5DQQaP+@R;q=t#BIrJy0xUTwoT7CM@9&-`ch( z-fN8yt*g6u?3_bgZNT1yW#(C1+GJV#nY_%v*J_b0_r5+qq4IpV13vDg!JDH82M6B^ z%v0YR=1q81$@^_OJ)ZQK>Ckd>JI#LTW)?gSucBAPFxGqY=V!9*O}uNeN`T_1{~E*o z-d3K+bQ_zb$@AuCJZtSTxa(LH0bhM6C`j%!fS7KpKiJnZju#b2h>F>aHH1zbR<)on z!S?^d%6esYea`n_XEs^M*W+=&n0`iwY`4-CI3|Nd))WZsBxw;r!N;V~6TtZIcrfzH z*R4*sCo4WYW%yRdy6uJdN+AY<;u+6{603UhpPx*_&xD0B@Q5CbgMM5c+D2}t6OD{-qqFhDW{BH&edlo6;9j)JjQ;sHbBy(D5(5Fzrp8659_Et zJb(VrOlN9KX}1bp-Af5$t_v=_Tcc=IM7rX`yRfYni1d>!v7{$hiFJ%(muTK3;JFpO zexGldX-{GbqT`fCEKeoq8`Su$;A<~HaOc2FG1Z?01wDk~;=UE|nFaKIFB8{EnR3!~ zd~pY&6y73yC5(Q1Z?7-w5=F}S2hFCgpD8cCJay~jtq*L5d3uy%e+qGLF9wr;|8VP1 z;;a<>6q3UV-k3tTuD~3tnIz43E8plDTG_P1iA};N+8m4evP4HuAI*e5RB72*7%UBM zdnq9{V4QpHTEY!cbmdz-kf-EN|r?*DG9JsC2h^9DqAK*pnLVFSgOBa2@S}4bQYX>|E@mlc}j2 z`RoS?=HUFSZ1X?d+`gQd7uAFrkZArp6;BxXoVNXl?gJ(2<1*YQ5p8_!@gj8ks{7|Y zKiZh+(GYmHgrr6ZJ72trh~ku)* z;NKgGghYA9zKeIJD_tATE*BEzL_BGws)|dk#Xztw43$U03XlFQhqEnf*J1Vj-Hnda z3$ft>fv?Wmwk`iOb?t!_pYJb3_h(*F6V^?ZV>o3TW{y3s&u>!8`)G^m)3XdGv#ZMe zOLuPGWP?u-JbN}iD=X_rp?R%;a#7J~TJ}>JAb1YtSclU^7-#^jy&#W1@K#;-R9m^78870{7FTXDb%)m{`c!h9&p3Mto<}w?%%bsR<`{ zWH7Kx3@suk51|q#Drm+AEd`6Ik|#FhjIuLpmA=l&w%6ZYkj0wkbNH`4BW%qc zS0*>2BcV^>P~5@eaQF~5)O^8w?xO)^IpP}mCsR!Gw8ywd)$+x;vw5Tjy4 z=V>`4zXB>?oi;Om@UY742qoo>Di7P4?##@wSK@|mn*=7Or`2oycO3fOGKlG=X}bM_ zD&;s(bYif?R_OHU=kX%itIa&FZc|_5)ZX7x6YexAZm8IlvSgg=!%zQej=8n1txiG$ zvC5lVu+PMu*-ZcS9*t-L)eL|~Nb{czx{$FVB96+r{L*}O>^!N04Zy*8Z1ec>V-ikV z9&gl1laEu6;#ld`miYoHgy;i6LK|xLXuuX9WSe`xD&5ttUR@mz7ZBajU}4ru{^i0Y zM?e3)SSl#@T7C1g3X$su>;QPeGBXA0>+1oieS4$DN4;NB2NH=y5_&bS_97dL)8HS6L+`aTqFj4x@hWO*M7J~L{L0o=9)J)@^J=!F_AfQQ zYb?A;fU+Kkgor~@U#eMSgp?ykHGR$Z<4sbOB$4Iyk`A>Mg*#}vo^l+(NV zY!%%+#jEJ(;3&?7!|LE*)Gz^C9I%H9+}|*!WjpDHU)ov3E^YtLiG8KuDGGIGVxZW@ zX}Fp_2q1?+h0|@Qm=~cqy86UaR?4TGbG&>&+_{YlRG#fhC!xX0xra73Hp%B4SrE0D zSE%F1k0SsTJ2^>~+INeJiiSZ>w%i(kS~9wBCMf7QQivN8w_p^gi1k%HxMl6`0m^{bj)QQ)A4EgrSkHwl%F!2ycozCv z-FnmMq+#=1+Z6vCbCFpNiZJvfEUYAO|IwZD`|7E3&JIImbI!QKLHo?<-dycp-lG9! zuA>0X#C*_A&}848>;LMkl{|RD{o)QhS1zv}=uZqh+aG1unC}G9`-`GMs#p$Z0Xl6i3^u}D-Y&UE zQ;0iIgu7IcFfM3nX_*~S>3B#&<50Fq4F&Wzq;m`_B5dE4w$gI1<5gT|>IKqyU;I@? zbM$Bg`jbP?YdBdf%*@$F&{zlQrj8BTpOEY*m|E%4SRQ)dJ({U$z*~+G6-sJXd8)gZ z>9=$EqrE-qV1I9ICe_sg_5|TE_4N{@LE@PkH*P!$4HfaRgkr3(v~q-0?IesoS`Ak} zToLjN2nc|d4LkRcgy^7($ip!w){deN_Y*ga-W!BIWtTYX_50fc6^-$R(5j`{J(ee` zLG~HGRL~w>jp6kj_RLWB+f3|-6wVr5yubIs7&BgY{RIASX``S0p!d}|c0@}w_m%p` zl%#`u|HlWXfuixZEwTK{DG*Q0|NL`NQ}ZZv0Q%o|7JHxCX9jQpQU$P1QHMTQ6L_ne z+C2Xwf3U)t=hL0?20R{lunpPqv}3O@39V$J4_f;z(YhBeUL;lYiZotTgu_#ZHU_|7-h62e7-=yIy1oMGtT%5 z+v&EKyh=XL0T!>eNfa)3A(wvr^o7o?+;eupOPUXPS%?0di<2q2gppVESuJu$QAAhD z`6#Hvr+t>KGHdy|EGeK1MF2v;e(x_^(g#Hdh ziw0C!E(H%^PHD3&OI)B5ak)_mZuQ*#?-9_Iy1~u22xLNb6NL%@N&2MWTYAV})40QZ z8Tc3qD>&msXiI;=c0nPStnOdm+_W31@l8`jXFx6QD=J6>M91syJYL`$sE!@oWFH|R>>S^YX!Nc}>8fw`n{b>lX1t$VP>SJrd3YZ?G# zrDOQvcw=}1#Qt{X^XJd+eH>UTa~h2Q?6>8yvuevIpwaSX$u^n`WGhl+)n zaH46byefxX5~WP9LOCti%K-R4u~CR7n5DN(U7frO^&Nuj+A@iVnc$PR+S`1_V?Vpb zBmYmLB*2D=xL!4Zi$QRjzQLgd=0pveK`!M-=ST;n^H9cUxWra_b*8f&&!YD2?n1Xt>iB(_tRo|jTZrr7x@lV!~aM$-2&5#2^r)W#;!wYd?yBp~R z^A%2`jJekw8Ry?Twca{WTWIOaTR$yj=gM%?<^_P{R&V<3z zc}z6XoVD-z_%K3Uc;!hr{`)%K6@|6e2bg70uX@h7u!13xnsIa7fIy(awv+A_{z7pqTw?qBJW9c);>(!aG}IS{MVPL<(brq`d%*l zbp=NjrQLUZFLQNigS5U*dqBN*0ivcAXn-_n4TIwX0-0(A&r|SRaz7atxz04W7=Ji2 z8b}vPt6ItLc7$^uUh~?EdS__gW_$Zu1N2c2EFeJ>fnvVfFQ&cgEdexU;ralg(@b2d{Go zM@Q#>IB(N2hEdWv8LNllrgAu-p2mHjX`=C2QAZ}rUc{o7Tg9F?fc4i4jx(7>ya)W&BX7#<-oU<1!-G|9y3ZbqL>p`*K$L z&*?sW=S&=1P4#yH^nelW+jZ=4nYjN!RlvH0c*wuEB51R4G&1G9^EUP{5L;%tw<#`1 zG_N9Gs(mn<#mG>;zh~D~CWWR&i%0$Y`joGl4tS2*y)Ksv&iDJ(wd=KQoJL(Vwq1tq zVR$Ztxlb;flZjvb+e8VA&@hziI*bNTtEg14yKOG8@f_T>AaY z`Wh_*+T9&>HDz_$dAt2ZYr#~pRd9pl{cO@^GK&#@(Li&W+E?@0df@c$LDm`rJD$#w z&lNvhhZV8IS;6VyimnzK?eWMLXVF-IL`wB#j@PS77|_0}K>dBgdOxLy%R}3Cz2YMS znp<|rU0juzwVZV9J*kltt}?GaVqQIQQ zh2id$FPt1@<2rz*3;DBR_mde71gIUoPx7q4Pa{L{LyaJ5S(iCG26X;abj-3zjq?3 zx_?Bb_Gq5orR&cL7q`~?U|pUGX=DZXnUsBXQQgjR-jFcrfMwzSyGK(NU%u4W1*6Q% z4D%KQt5PrIRW94p`){znIAz-i&1W>P#S{g zR}`QlBZ7=Af)5Y+U5pFp4f8HXvUL3v>H2%@z7F5{-_5n|jqz?SQ-H^E#GT!SRFluh@MlO>np!PyTn0 zru5PjDW4r1bhP;8;_!o3&PmgK-1Vnqs`hLrg@Az1EZ;WaLa1u-cXC*A4{YE*d&>Dz z!_V7$jkB3oQswlFU)HrIEbsoQxWtlWrhsg~INcV@kAHFX@2z0&MyCrejfXpLOWa}m zS*Y(mzJy&m*gB_ZuE?b{kobOC?C|a1e3+`>_mk>#KHnT%{Lfq4j^Fog(WI#`q$}2U zB%}6%sh#pILuq3YMZ{nIJQfQLNthggO1Wth3;E)?GB(H7oSmD- zwT3J~bdVsZ?qu}oM&n?$@Wa31?Ncy}b!HuUAzmRn{4IOAkx5*XD0Ta8o>%g@){5PS zf2gvvY0(}_f44Eiv6=q&H!^9npfM}t=XRl3d2@EFmhAO;%E=ZyVQ;bYQ(*ah1=FNA ze1F5W+hgZ!&E~N(2WU|VLlv^-v@~cO@h()tFq~fcSgb1Bfv~$ciHDk%cSC$I4u?D~ z`y-fw;Q4dRDoIUkqoaKUXJi{_=_J}VFRVoti3xH2ix=C0}fviA(=^Y;fdYQ>Ke z%_|Y+*rB{`YwPE#IOpd?qGzb8)}5Gg1oVZ8q+b0;zOm=>*d=L;8ck@;PFmI~ZT#ua zgWgQOS&HTf84FT`_(eB|-rK{%N1+Ahkj*9(BdwG2u)qL0QM`|FCpFUyusY zh~>`(v^v$5o(gP05Zh7}fYN7#?f3WQNdz!s#Y0K1P{*R^d*f z&+())&BC*k`k5*uF)e;!dy0*nUHz?2azu2ru;!r?E3^AZPtLa*g^zb^F-Y&Qtx;-K zRgLy)e{L#<$4sh6Fg&YM!I7-l)>kyzS50;gP+64AdW~k+7|@V_8zfDsWNHv%dKa6c z0X|h6W;V(>6@3cCq6P_xQr7DZU*o^DerW1bP^9&NC4%0PX$21*gQ(|B$IU{E^bl$$ zMH8R^-yCyyKYj1$_k8VbOthFbtoHY@CK`ut2r4Ejn=ErAWUiBUzW0>KIhU9-g)?Dd z{VjdSDqO?GGQG+q<9uTDjs(D}0wqFU2Q(g#J>q;yN&tSSSglj=^DcZ!&-S27G4YKt3oCf>~|}pJPkrpno7X2Am9X&A-KrK zmX^9f4oj2N1yFVhgV&oiz*aDzMXn9RpcumeFTFG49`S7n!_8)d@F5`tY(2^-i!44r zDtpi$16*sV_mai2gG0A>d`Q&wBCAV(AMnNR4sOWO9MWZYsXGIra@0wkl^$PI<^yK= zkQA`RD3MsU(+6r(VZ6>{ys~!mKR(==1ir_AYuI~5DDZ;wkQ(T&>3?r2g@I?Gsf&(t z$kAL>C&;mnmjR8hxb)*g1>IezkIn>q@s)9XgW7wU)TgC-lr9Sjw$^cNuN(dz?32R` zqg6YSDW=;Lj0~J4LEWg?uaFXdV8kU$m2#dUslB*lN^WisAIwp=yu6H5JOG7S>m~?= zZ+zn7mKRFDmFMf;(M=sXQNb97<3lcdmv*gn)PA(8gc{W#t3 zw5Q$Pkw+s+$d*ja`|%4Q7g3A9TEAsqd2d|E50dcBk0x_+R$fw!Ax=B=V6!cUs0Dj5?YGm)fOXd z0uVCh)lE#Q#5zzA2rd zx^9)d{<4Q%!_&`xfC0BRjIrjpT5}6|OBIc_IJGxRuHkvX07}W6?5j`Rf)tZLZS*l{ zqIrS(AIf9#)paZyS+>)dRYXdF)tucRohR*e!u-DUQQ*f&T zP-n4^^AOd2Ox)oU*AQ;~ix|?1y=9cPvl!6QN$EFTSMF*wWa1ez1E_4<29}dlKAT%j z&1S+jI$tv+bAvMq8u~wIH2QL5PN6JJxF$>HM&h!5_k@6+ zQ0lv8*On-yy}!Gm;6BmxFg7+aG8@hkiJStiIvgm%)9w>$(7C5}yad+u`EQ5RvJ5}Q zuJjOqvHs|X^TK}O%>zcY)ci%}j1&~k5@Wr&*WX<)NfL_x$jcPe_f{8_>tk?~!Xays zpMJFN08$BBW0o8Ge-N|tbW`PCNZyWjpKNC4;`*9o8R$Dai6gFRccm#bLUMU`BZrDa zQG-~vF05W5>8w(Y#hI!j}heAaIuCp$vvmcL!8I8$Py(L%)+ zB`l$V?re$S(+1Mg0hn80$V8;28H3=#WFM{n#;fT6Kba^FNt27fgOFSlpmeSH4!enE43fO&HD=SN7;sFWnHRwT#DJZ~S9l9lN$Ir&1wD7(Z0 zOQT>$Ll1&v3^@x638Y@omg;W6%?~#tfwUan^wmpOR5vr@g-^LwqzaA=UL7om@dfUd1w9`P$eeQL-&ifFdBMDLf2Lo)?F zFHjScA)ZrYYPTn1x@A3QbS7tK?Ez5Sg%s-u+L9H}MNaU*cF|`@mZ7r&z_6*RC@K8{ znW4vP{(CFb>lR6~%5PI0$$d3@i&#uR=hv^A6tBL1{faCw1|cyr;`CdTvyIci*_j{4 z&#Jsylez?~W!`EsN_Qq=WK(2WuckE7?!=)$=sY;-|X1g=RX3e`0qp;_C z8A`2LmqO%G3tT{^+pJ7{ft=pZ*toK*Au|d}N#O3BShe?(9>m_))C)3z=W+ocMXrJs zMyc&7JD?$6q+L?atJ7o_J|}K#pQZv18h~yG_wq_GW~4-$R&346CXTJ!Hu{VnXWSii zb@e!LLs3B?AsSj*k7`~phs~lIBbYafLAT4S>Pk*n=)SF=R^++cnf?INo)rGxkl-aF zc=#4_^&TQaB_IWs%K+r+68mmFl4L{#T80)i&vx$q5vU5w{nh842A(gBFX3IC7t@ga z73-h(hRzt@mn^s7DZm;w6lw&F*+XY(Ip;;PA6UZ)0MKDHF};JS6;p zMofR`CDN|D0&m+s#9TYwCh~w=@O^v^nHyl%kGD&&wiiZ5yy0lB(-63nsqzmGoqE*J z5u>*{C5dOerE7LSFa(c`R4{wgA&^7HYA&5U2$LEgcWLwLse3e}&af+^Jl40c;Ku=R z4OOJk9NlWvFxLlZ8>IAMY4f3q-CG}DIf+EPp^4cfBLcb1ecB2EjP?}xN$%Lgw&xC& zwH8bx(Y4NYdqmGyXM}DyjuLk&3P&%D4&>s2&eEonAV&UD&@e^F+IQ)ILE&(x8_lwC zp?S=sDgsP@@6JNiTM!y8w#oMMj))At918z)Psbd6$N>F8WIEZk7ezw;62^KJENZ>H z8WdI#IiQMLgVuBG2%y>CG$b{j;y~*LBIR7McC~F+nv)1|ImEn8O0c0~cVod(V`q0y z%#=pZk+f!@ph6xG1;Sb3HdyzF;J#b#kS+2qOgEkWyyYZeb$0*C{=)6wJ&iNEMK-!` zb%J?5E{Y3!f9+8T0!n!Tz=&SraY4aDIp4E4-`{Ptto1MQ$knxJWjo-IsR%)e>Q^NB zPcj^30N|^o6H@_p++WFHIqJq2fp3)o1srY9ch;y~Hcg1)Y8`+o!Lc54l+OB0Ay?H`W`k zQ40v83q>O4cClTfeOlIY20lBsmAlv9Jun8Ymz*MDdUe8jeV^b)XJumYDj|uJnYpO1 z3XZ!eA|!;MBoV6=FPWs`&G=}GMAK~sY}F@OHZ(GcwNI-nCFYP7frm=Z!UCKe=Cw}L zC^KEl@(1|ST)b(LNtU(0Dt7lbl7<%tn6E|$(R0U7uL|-@4N{DOp62i^iwx{c;1e5r z+oG~FmC$>LG5rgGM#l3CYdx|And8o#QsTY;gXAUo(T-}+4qL{cq+Uo+ z1Mq9K#G+6cj-Un2$}2KNV=xy0$U&Nvtv>+kUV^N>9?1uyMXyRkNSQVy%c8h3?+s6I z@0wUvAEBUbcy~i?d%E+%?6%)Ru`sUet1e*hagMtDwf%iONGh!_#1kmk;fS9oF8)?1 zpmb2lfPQ&u@RC65KL>$(4B#kXQY{-VeE49X)_h!FIM?mou~*QBOnOwgRhfYV749{6 zV0;xyJosRh-r_{m)E57?j#YdQ$>5y-CDlddY-KFNAV(&7U{83vygFTPcb!>FftMyo zH{~4J@pI>103c2n7f4Xq+1)jOrY1$sd0x#CIPTHjToT>B`i9Q}=@_5BDiXPWfOVe3 zSM|BzwqFyrlFmPfx4fga_*{t0ZF#I7L2_o=b*79jEiI{WLl>`Y#!Jt4KXoiR7}$4^ z_3xb1lHuWFxDr<9w=~G&G5vUDGTVJp`18kzUlba~n|H$*Hy&JNe!K!Ms4mjSR;%}`KLfr z%n-poBe+`?bK<(10X2zGdUMsjYd{YbunS55H6mhratiDR zY6hHtUoH2iztOXM@P{&y=#&TQ>6qf$2hl_V9yraJPSoD}JgWs07zwihIowN$qLtoH6NzMaM%E64ofjBtWa|L5-;}s_5 z7r$~LOm$2v1|rj#co6~u*xEaJdRjk^aB|wqoN;)bJ}KjAhs<8nmU;g}U-*+7w@tJm zXE#)Ga3ag*9{h0hh3u@f`#XnM`O)|6HkVkA1dcdi+vxPJTy4(OCR(;2S6sPgp>UkC zpHu#*D{hbw#y~J|@m&)g6j6^)uf_54Q}VL2e*xo8;UIc-N(&mq*^BDXb2ja)eJc^E zSp^CY+BXd=;TzjgQ6V>SoV*)G14FWoZ?9^bR9=VEBjw@6wrr@cOmdNI1FU|(dWe4i z$r6_VJ;};?pFZ+-N<@SwYN06B-dGb8!AM-bmVz!_!c#jB)eV)S_IfnNqhyW? z3u{S>35o|n%>ld9`odrWN%)1zIqe2;5ntuA4hXLkK%+gZd`>i5{*5C#l!IiO7I>d>cIJ}4~}KNg5R6(n1UmO zQ?!%71Bxu+%(774-hJ>jSyoGWYc>-YP*Bgz^I|@G1(R!eD8#X;kB*Ap6L}7_CY%eB z*&2K;VwCp`0RNQ!?GSaXq&aK$D)hvlTlTL~PxabbeK@T{=O768bfb{(Po-Zh<0DcIv{hqT+xoKY6hnP9$>wfVq#;N#>dBxP*F86x&Hjf4N4}e=qz)bFb5xK z#&Qqr8ZAHFLDk}fkD{9gXwn3H>cVh!7qM9YpJ~y88ErirbtD=mGjKQ3e`i%>!19eI zGq^l|U%v&nA_+d2Y3zTC8j**_9zK=MR%RtlF8v>A62$Pgrqr4|6^6)tpeZ>bg*HxC z@T5{zRi!=!z5Lz((l}^UiB@^OT2ssR(F6mN>M{oO!2yUs00UrYY*<)@q2g`_UrrNvNgVcso#=-!ngqNAN!N~4v0fNp@DJ< zwX2Ua1#0%By_Q^N6F8Ww+5q?gqp`U-+zw`$tAL&q&<9E+=PmG5!qE4SVliGGIw%W& zX1(;1Kl!>D^jbx|JwP*NhXlN)M}4UjH_TFtRKvZ9cta+BSMc}uYphyNzcPS?V(MD| zExW?8z~A?kGS4DT^@fZMy-ubJHCv|g10MEp`rr#An(=2u1RWp4Azh>Z;7{_iu;P^M zI=Dz)5Rz#a7#)I@t)Sr6*uWWVd<%AI=*i~LgUuCJ!El^S%h>z8vD_r5?R*nvIoB|i z05%}YW_THbz)~i7>J)7d94KI0nT~-D0>%(Xnr{{3L!AkxwZEiG&jcuf$jKL$ zsu>-eESr5({FSPJzrWK@GfdYaD{~dQ#^EzA4vwp>@gii~V8r|D5s-7@O15ay}(|J5V>9(UbzN|nIvb*W4Q>^b{!1C*#T1n>xplmMQF z>p52$AN_nYL~?^7Yl3 z9%=X8b6#`gvUURmgWQFhz?Y{)hD6kq5@E{&kI?_a$=L!X@ovDvh?C$i2Vij`edhE( zaWCM`eKv0&r7KNXe8$gESW7Ey{M!6(J-N>`JF+t>DJlJ@?XnfNBs@a%uG=|V!a#9| zSccDIXxmAyc*Ka`k}_D<>d}XLme-P`38buZXhR>(_n`k))ZcF6UG!6SRssv$uyG`| zTJ6!wL`9+H%Ttjk8fNCYag`v#PZPM0bM zW)*-SYPB4ba^R}cJO;WvJfBTC-(#Q$P+SGZ6GT7X!Lz9njg16UlJse(SLSjiYVqO! z<^Y(X5tES-jQe?t!k=X`^Xt`uL0V_f=7&3#t5ZBAWn(d6kX=N+(_ zKM4)RFCd^={`tvP^bfAbe~e8QJomhc?7g6_|2tI>=EQ`G&I@ z`ci@(`jBJ5-~R@9LdiR2zpD%LngY>+@nIe550bp1o<3d}94E1jU_C)JuW`O0EOwjSh|B%KWXJ4srfoIwOQ;6Dp6|lGPpI)0j<;**3!Z-dcca+d$ z(G0L(1PX?xj)F<44hUD0+|c`4ATa2!dm@c{rW+&S_seD*>(x2~xeUTEp+%Y?=NJ#c z|3(L*4w~v>m_tJVq1z6V8zjpUR4$?as4b3KP9OQ6t!z)a&CmlS1QxKJDdo&Z`;XpT zCj;Y4bgteZIE-q3MM-DX#vF-Aw^3^LYm_1U135mR-=r=$KD0hXG_v8Kv}LUR`zsIF#E%2v#=46afIw=o3J@q%Eg zx9Q+3M?e7Zf!#O;h9ayES0-C%YP;?(QZw?7K|p}HFe74g7KJ4vjhR6dje~a-HxmG( zmD@Wz!zgpXc>1oi;%+<6zhWS;+|N%mnrE&;!?>i(b++k1;eink5u7^GoRWg^LkfyV zNn9JLkq618mt^r2w;rl+ZU=*`JAQWMU-#r+&uk`uO|ZY+%r_#WOagT70&4Ln1H-dJ z-_=&J_vTo+?Ck9OKUlS-eimX_kuqjS0|QT6lk23S^v7NIQ16Tws2G7raJNB@<=mDf zl@cT*Kd@N0w6ui7&>}en#pKq72S20Xkc{?Jfr3#F;vQ)L5_;H)@_viShETdP$3Chc zAhDa7SV+EYsH-%Az%1(k-XzI@K&3}+od8cFw+%H)Y5lqR&?h%*(0d(54+;$LIDJe% zRdiP!X(g}Cfvn!zTO#m+UhNo}6B~p+*s&+^zrbuHS)tgP8-FQ; zio_I=uq34WN$N1j_>p0NTmI8N2G}kN zo)oO2n%`Eve*nH-{7`5Pf%^OV`@V;krZDoRJBqYzdx5OYh!8H7GF>nWghapLr%L=s z@nQ8JBV46C*ZDS;#b+@U+^Y%t094m2jn#VPG*X-R|DVl(u=<(i&82*!&M+J~<${Mx zI&$=(0lu_F<GEv$*1-JkM zFd-ki-~zf8r_b(O9t2PO!#r!4$1k;^Cb65Dmv)>K*S?Jli_AA__-Saj(2ZVrPsgE? z<^cy$s51(3bj!%+WiU@>SD7t?L(ZYcJX95fFBmj^8%;7v>(~>GNZkkhjKudf2aFQ=Pe9IU?nex)y=P)oJK21rMiu5Yf+`+0OOmlIEenV@>9^{`*E(rs|TL ztzi6AL&HK{Ot(rYet&mx3dO1I#;bT_<4>bR;Xty@{qI4l2SL*27o!m1;-2~~GzNm? z>VQowGe5r`c)tNY2*F6`Ux22eAqz8kV(qMPZ_2F5%I;HwtmbI`RyQP!VQKqZ^FRxB z7<=U=8<@F@tmU?S{lo$X;g|&H97-<^l}nIlYVgm9!U#~{A#fRAO7hda)r6g^JSZP+ zJSNSx`7K}C`?@9FFd8TH?OhyUiId${pZ(bV#w;ijU!MUYsDz1E(qIzItg#w^j)zn-3fT2o+t;w%ejZ|U9(N4jYP;VT7Z91vdE zVKBg(mmY%anj|9yKUEWMCH{wYMtSMdXS9%PPv&UE#*Ct>%t*rtpc(fqOY>6$wqoO?rl7tYLM2ChNs_@v-9ZK`IBNW_ z40hVT(pl%`HgtBfG18RYFt`Qtld)&QfTy_e-arIKkTqcN=63mgc7-L6nZv2qx%bpu z#s|*$j8h_+=mv6!uRbdgI=@)AGD$jA|82laubh4wVgGH*o7RL*UQR9%lErF^0NU+< z=zHqsyNjSru+Y&yj{}uJT_J87>6dV8fLh>dUb6HLk4UX~71 zwI}MXKLznM1}G*Nlj$E`f^k8}xo+bPY^QFXOwMPC!-5)Bv(6%E;e;{8RM0I`QH}IC zf=^dPO2wC@H#dZgA`5+p3B!vY>}-^FT1)fAyaGeBn6XwS~TkWLbh)AUH*r zaG)SX2vd?cA{3$R2@UHzl0~3a@!>pJLG;5J`M`Tm4N)=!7ztH$jkzitcUW-=tcBQp zlB(6h*D>ik6K!7Myz7NEFL}c^Zu;JU<-3HSU{Iq}5R~o8#owt}S;s*F(?`ItF=+}Y z9_Z1km(O^z@2c}0_y>%yb%Wr9q(y`8#t1jp2h$h3o1wa?Ktrg-r+`iZ*mr&YM^u8C ze)AF%CITS2zJ)I$K#-xh%Bnx` zo;-@ZG>=P#7?XDt9CO$)ND%;uNTa_{^f(1pp(||)*cxsj9-`lPLA;Ilujz`lv&H3u z8Df)Q{ZkS<{Dwv+gC0iwHpMxlO7i4Anr6OS7zsLYWuX!j2`&B@{UZbvb_Zr#2C?2U z2w;J8U?F{c6^7(}P0z)>@p%C$%04p`9QnT_g*1SR!VJ%IayvGTywO5oHJOnOTLR`m z>T2+;iFkqJOB!qrDm!534Ln#2Nq_8v{_NT>-tYL6J8^*ZJYJB40+W?-Qe_%v1GQf9(D0t4?M6kK{7MO3w zEyeJmA^;^1sr$H=`Zbr>R{Q{~26p&;g82#QfL-B_pbfc1gN3TDC( zfIWGrnfTwprur|}Q$u;37_{pMYI{i<2>_bQ5{IWn&vxUlHda3bkF3c*fjg8c`!~n+ zK@N30hX@M`BaPUSCjCGQ0wD>84NrSaY3^<Do~on=4D`1B&eI<*!1R zjm?)5**e@vZTbO2XFOH;1F_p*;88i@Q9(yr$q9f4UdzWv9s#_>zapgQgfAm+(G`!D zXQe{tsTiMRqQlIpTy}m!S6>&S!C!1hG1Kiz9q50c0N<^#o*M*Nc6USGO}NKj)UEn+Q)zY{P#k%sjGQubL~SI1sJYX_FpFR z`CX?7b+aDGR@ed3#0P>zb3m5xA(Z!%#$i}Rbx8DE_dl#Qv#Sq=pf5Rg-l{zj77hGL zzWY#O$7L*4Olk(CDL+DE8(*UEliVTt1Zk>KDgZX{9!URsFNG$28yd%}X07%gVTbdR z($l(IW)oe_F}AoPu68hpTht*Y__fTKrP=fIi0X-^}p>iwOdmJGwJMuod^kd;0Qyud6a3Yqtc8Zo%AM2fh^VCQyx?bggId9$Y;;K z^!FzP1qB@`gxS4jk^=IR<;6`K&~(Qs<$YPs+Ld)XWd>{}y5h@d?v$6TT?$eDB9?g( zM$XI%K^g7c!Zh~PMe{2UwYEpS34qB58VqI>ER{~TftG#BRyBvedv^gJ#!#B~HLg*Q zX?wk|^(F?GAaPLD^Nl{G)zoMXp(dV~_rOPDhYQl3Yxj(`_i&+Mkn>azityda)R_Tx zS3n878DVpd_)nw-Kp{BtC$>#>6Z zvtowOmlCik%1Hk`sg9If($@b3UI$!9tt8F?G{$74w=`HnsI9;#O3$w}(AukJAco-? z@#~hZ^;QiYRmA7+1r8jLospKYmWRrkvXTi!gwM_@nLJd4S5;)=CXa)#mVMxm_kD3)?N#}Gr(=Tt~Bt=Mm*Sb1@8gaa1^h{X`ZqHO?u_q6*(fi zG(B#gihv(IP5jb4l-I<53JiG)vO1|$Yct|d`Fv!b$@2@+ zj4H%$c8wc1W{k z6u`EB%$bsQV9YqTjbXIJTc*sBf`lXg^oPsT2Kn#AzqbS9?$yyzMkb&}>=@2J_$iD& zAWp%QOsj#KLG9Y?_zz=iDB%0slc+%4$J>Q|m3U_!s=bYn_9OzWTh7%>_=-orVDfX0 zH{b{{^N<)0bV?5njRptC+&ytYaXeImuo~;RS%;ED@tq$caOtYDV+C5A8>#FDlI5zoExDMgaGj!Je47ET3~?F+c4iqDsX3- zGq1f`eB;D(;fL^z8;kU$`G7|o@YOZ0@>e`fxN3+co#9rT6v^jkXxXo2klG-+0CZkZ zc~Buh&3^#N+Ct_b4PZh&{XeyRX;{r|yZ$OAh0>rwDJm*MGole0npTuYh9)8p_q*@wzV2%{ z&-1E%Q-Sgshq1Zg%&KNdho zr)&t79^**p;nFG3?E~F05k3;B8F5&Xb2Ja^#fB|BJ7Xfp4psRbI0Z@@~e{5E|{DnFoe{qfC1^22` zWnbeOQUx{lU5OQ0A!upjH$Fujeh6=|O^4Hqg7PG}Smos8Sf|v1RAK)^JY+?h@s)-7 z_CGlWr~c%4^YnN_OnUb_PH->0=RVmQrJgS_2foiJ4NI^VOMv!uX!89_0q8+Nu_Jqz z*sX!ZbB}KU+w?*uIKhc7t7tP-H2E#pEh~oZ>BTK%85lEzsHah*Z(uns-(q$F?Qozn z?!`BkBEHpru;(u&hYltz;UIb)2$e~~a@07cQdc~6re8vmK!J55%!iHh_BR&20dDGJ z0&C_4>k_j>?Pq6~PYkERuZm8WeWGOBHt&Lh!5$izyGI7EF2@CNtXRu7@MZV=5cbb7Wb1%<23eHu zi1ypxs+V1{EC!?`{;djn^RJ}$Of(fyeN6M6zVjZq_&D?C(1(v1B%LK4SWz+O~C4rx$*7IiTWJe z5_`sb8ib~XvIuF(K^9CT#bsbW|Kcm%cn^dt(Jw%6oPsxg17LDB012AlPK0l>xre7~ zQqF2*ALLj1xRB#)9j-bX`5;e1e^2@CsSUn3vddZ>yF&_2ld~`FV5Mh{rjrEp_M@U#roW0fgNiSAUC@7 z`sm!zcFp11xCS&-2eK5rA#}t_RtCi=If!zoPOL(YXgyLQ5WA<7%5OHkGV^)4Lx2uqZzG8UeM2__s z90mBFg6`SQj*fRwa6(if0I*4Zs?B58;~n$!St~D%_cjjo#$IRTZd*K%W2Ty5W0Y4b zsBwb}UD6|IgBl3rg>_q7trM7Kp-zJK3X0FobT3m=Q>;js%fOfBFFVmbeFr8nX`C9| zm9qZ)xhD@?obkpQ_3O|WxmqOR4c1Cs5;|Zl1*gU>iEne*9~pjp5E1RaGVkpMgdkhD z9#T!Ld%BAD{Zl#HjalcrIcXS%Is9%I9%}JXsZhQArBLN;K(qAG%7vL?mKaHpp5AJ6 zZ}pu%53dV(woueqgnfJ#1%-FuNI|HHXOWwWDeH>D5Fvz_2Sx9lbSEl8cplbT)$fMe=>3{{J zopi`gWOQBGS1FUnN0ALeL*dxV4{DwBi;r&JlLiI~jDyzNmzys%vu!)CbPOTnyP|{} zxaH7ykAt-kbU8Tq1Z^*EoUIYH80!}g4LT@xg`pj}1V{y%92z~AmEpTlxY&u1h9V$; zW*wL~x4qc49v~A8x||GD2V$`=Ia-F$#gD!G3YZh0PW!a!ZTy$T?yO@waGR3+BMfFuwC zX3Nd(pT!i+1RtO%`3S=YZO&)l*|6>^bP~Wj*+nX_b6o~qfMEy%5wrlLHyUjWv3bUL4 zR^Fi-P6<*Ub|&tAn#yoU#Ss0!Cb`pBo70R13g>y*c)O-sFY2^b$4n*inOzN1zxMR_ z+9yZh;0^=m>3%={z)+^NQHfb^HH`UF&T@Hk%8@4k0Hqnr+6n7MC6NcV1_8Y^V~Yv7 zVgQr&I0?$jx!AMgmf%{wvdg-@&5k|Ro9)|+ohNZuA2a|d*Zo?ylKbpM&@~{&bq^IB zf9ts|f3oboy9u29cMMKFUzs&XQAa!#%+CZb3F_C?sxbM+p~jMOkqj3*O6ud+Us7GC zGituy1@6E;?FhVnR~)Z~R^!q1Yot@Ue8tuD!j2E&toh@+DFAAYgl!e9j67TJD~V9v zUjomNpa))e$yuU?mZixWt{OFz#FHv~wJK`pl)mcOFo%k}72go;X@+1sHVp0bT65o% z3ue|%E9JW}@3N$X@6N+L%z_Iq&RprQ1N4ME?%Q!td}{Aa4uDVH-rm;B66^h;`p1yf ze9FyMpRDUp@4@MjaDy7zsOEWwJDJ7&^e*l^b31n>9%#v7&iPw`~`!!W=828gk_Eqy}uv8d}Z3 z^^Ru&nx>}qwm>}~$U<4ZZ9N|fLx)xc3SSI!b;^mBCfj=!T6*>G`NNesbDgjBSxFx9 z^%AK}uIhX7)Wn9vW8yHqWBD>xhgW1_??5itAof@2KO}4Vy19>4Ep>F|{E4LA+6{Sc z8Uaqob3H%FjWZ{V2QYCR#6wYrAVy{{yz=xS7p21Su>NslU08%?0$uy<3j$;*BWT) zxzD?d)CS(eDag{p+oU159-oa-*LQz4mbvxW)x^cBr5aU{vL5n0(u4N5$kF=yyPkOW zjoYt+JfpUiM61^%h>A(BEeKAG%4nx?he;y!SZ5V<)!xTGs_-mBtR zemXzp^W)+>Qin2AGB&^iB)DR~?^qNwqRpP+AZA$Wmf-wTmUr>^3Uzf9G>#RVAX_ai z2m6V3>t>t4FL=H_>{z#ce2=%%$tAv<{xJV>MAEdWoxE%9D!7vjc9Clbgo{xLGOP15 z@SdG%zp&EN>(tw2$MTN;`mX2T)<t8Md#HuwzLSTU&Yn4G)eSM?5(c{*)%0#KR zyJyB+ui8b(Q~8LTz!ZKhq-L!9P?}Ii*tvKf

=^l(w^45i*f@NizgaVnaVNAy1{` zrQW&{y5hmaT@;DAAwR8mO0o(n+D&V9k>YolH3+Q`;wKw2rDB#ZVe% z#LKG$F~*6glWm<;nT=gSK*(ViPd99D3U_ZVWQlYCq?AM*Ptsm}-3GSI zsljlFtFG+U9G%{qsje0h-&NWzKjFFLT7KuPqN2J|Z2h}G*nAyn9UcCjc0$c8N#|b7 z>QDlDiYmM8<{p_g|9Y6NHTV<#%A~4F4_jW7#X)T*5Bv6{%jNtnUahgdl;kJ)rQm_X zkhOGLP&s9Cpl-AxU`vN8g{?QRtK!>kl_NSemx*=DLx<18sAnOJ!>pzF?{~t+qN~i( zH%|=l>J>eRsBa*)vl<7w(XDHl&B?&_yLV-t{V73lPpA|qtNx%p+M&kIhr=YArNeN@ zcB}Ml1rblJ)+_zt?q~Ta-?37R7UM^o(;tkclM@;`VU z0tGqyC-V2@H(Zh^5zXVgrM}Q2w_?yG;=bKEl-c@!V8gmhr6?a3g;YO`^>Dji{5(GR zfPpf(Ir>ax*3Rrj?+pkigERLq99k!RJ5)qBu~);VFn5fXQrsWUuUZz?d!py#Iu87a z;(msYB{_Q+&By8%6WzW(iC!-#-IV`xP~e*0J8DlQ)rh>m)^3K6h9=psZ%cQ2Ojc}q z3dQ!bXk;lw=8x23zq9L z(k58enO)29+*KU3vFgvY(o}LJq8QEB@MXCucgCL3EqXSk8O$ov(f7TB$1bR9Us>Sq zN2VU4=suA)70;~W%a@t2Eh8kb@qgP4OgmORC`|78R@em39 zxZm%t{eTkAo82sQ$!DX<(xq~=0I#N#vVDpc8<{_jMCAY8d8u2B=9~Ct%c*J?Uj*(x zmAX@Zf33q`_fx5IBjZg>k3Yc{HPiMo$iB>VzR{{oZNK}T#ruH1!x6bIjdEx+VQQ6hls@IVPw3G5Z=m z>)nXkPLcV-)>~q?_gzFqx4jECHROaOC26Vcj#IRn2c56CY{@+>GR^zm+3&eoVsdrp z%`;~XiVt0D` z3A~gr7v|<*K1fY5X1$PX;N)z~RwE~F5JE$#t_Mmmz`La64K*wXoL+j?XS{6UvJh$si zNn#p$#iG;VkI+#HRs6)r&&^!|2{vf5+0Lm!Z<2_9B8P<_7*fIwO2TodH|O*6LV(rKIe3#8>UOLn`)R2FBX{|dDZb{ynp82Q&!5J>{Q%%c2u^QTb? zKywmM?;cfOzkdC+>6}Zxax&9e;cpc!MRR@Xg%q$#90gztqlc_TDI3G^)Bh!FCZ0pB zUZq|pBcpwP6-|6+5q8D>m7?5A-G7tO4HQBwlYaX)%gFe6Nlgu{WjW$7PKQI0Rt*0T zKl@ksPA&}!!YBMX8PHU5Ea`+FhNvi3XBo6dIEct4^g&Y~xk-AS{{*@rsNQQFBJ3Kd zg?Z51ug`$NU|pZW3F6%uAn{OIDSjUWF9N7StuEyP0QP`3o`GyBNWX!|@SnSncF?aN zfNXc4w|LM5WT66t2uYt2tt)*rNy2y72}2vf^@kYV*I)dmq5?-8w_*^u1P6b_(`bsd zEV$y3n8p5|hCkF+YOkYa@4D`K8kX`)W4dwnU7SQsC=V>&O!S(usoWx~$H%|?4Vr-xo2cKY=} zEtgo;m)*2YHG!yIkm|*MjWS5m0JsaOB|pdl$vRqf|MS=OPVcm~UPTrR(=Z6R=>Ihp zBT0kkf}0onLEPLBQs01mw>fJ)Dg%Pzk#UYZ+7LwiQ_;HiDyZ=g_(!GPdGKp6PI>V9 zy-PvM`T#f@#7hAx0r4H;rR2{^cM8gfg3LRi_q32_$XjF z!6ZKggp=0%Cj)gcsSo&>Tj}ZPGZ5LW0?AevZ3R7mnXB+_G16QANHHragi8O5Z-riY ztSeV;!x#lcKsI-va}=Zw^0C=AoM!@svxL?6z%eO8OD4!ib^G=&cfe^yfvEutR!ThX zMga7ZprA?hOyIEtJ39$W53`a_vS~;gopp)jE(gLx7gbJObfl`#p{fGG#F`+{GJF;n z=ob5-=7QSK8Q>(&y|QgS55}{WAr=Ahwuy8L;md<^hS2UEhbVajo*@c{H41;KJQ{3} z0MXP)JVpe-SCJ6uR=|{lQ0(FOXt_waNIaI!dv5fA0z|VF+=h*pgcD&{ESrPAn3$Lz z)MIslHK|0es|p<(3#@r1ib$aY7D?J{zEZlt0O_Le(V2z%92Dj~Dq%lspKhAm<x9MtLt&@&;K7tZtuS_I00 zzpm&@TF;$9K=TIUnD!8X5hKq{wNTPTH&;gDD+Igrqd-gGw{VW-E-6}Ke!961^5RWU z)!9(oDu@D}96$>?9r|~2`#J5|fQ0*Zmxa*NEt3I(flTY57EihGJ@W=wSK`^_Xt7G5 zqcl404iQ&!<3iJS^FlcGlpCZmm|4j4pLf!6GeAXEvUr}Ux%X`*COMEX93W2+MF=Tq z5TU{v=%c!2OCbJmLv{n&@ecuNVI6jXeWHS9FeisaEJdvyVnq_I*R_g?s3>rC;BClI zaI2lgLMcM)2Cj$=XP_bVDBwnhRc-JO4f)NiQs` z9$%hcgh;Cn=-XIE$bm*dgqn2yVT2U8I}L0Uz7ce;2R79XlP@9tEUq>GD26dE;W+q3 zXK;W4@h$5&sEtMm^V3TkBiw9$(4?>HPO!GkiqgKjNe)u0ln-GtHLz&`jF7;pIA-T& zcH-!q1<#Fi&j5M{LRcGxVrYE_4QC{1U1Ey%L$e5AT$;^(ZQ)-PRCBBkq-1}*#bG-R zb{c&8%~lZz&7%qk2@XB`Idz(Yz5R&GB3Ye5u@-D86e5_>jyQnM2qbJa5UFDvU^2lZ zx8??6M?oY+zPONUgZ3(6VfHziY4afMR$H4nhB5@VO@94)42uCRa@D`reITYQTlVphp+w>0!ez7w!)%0?L-vLf zLYhp|?nJoy`t|bGPMF+t1b0k6{X}FNTK_hM{;GfAts%uO2$=7vIy?aI!n~tCU%aS7 z+s@Z#TW*Rch)*yED5T>;hJ0KGn!A+Pf71X^?Jod0r(2M=B#aATpWZe7;4zbYcTSn~ zt7bP^T3T?&YE18x691x_iune_RB(Vs%gGY}Dbqhnt5Q;B+s75T(^(8jkvu^fSSp_73Mf|XAxGP2C>j8P%HW}P;AFTapuM$cOL-|NEBW+60;}tmN+ek zFaC%LH|H^S!wz6dJ?u&)pzS@l(wEvd$AYO^E<%*hiLP=K7BU>48?QIiCujkt-Wg0W z(FHLkg${b^Qj+gAmP8JN>v@ zzDJ}k8ii{AgOSOxGt-h~dO^akFOt1l5k{e(J> z=y4MyZ@{TZzZ?U^qyhTdL`?i)eEbZk64sv{G9xSneA!oYUx2K~SXb1SO`_JmB{rNl z53;#I5)g9M8_B=U&Q1#ik5K7|2t>F@jsWp3=>=2YSGKC-7U$;SIYYva4BVPH&35Di zV8Gf0%Arv5=FX-+KID&<36i2o8zOGu+{gsKPm158JH!*5ZUhn_Zs&TV;R zOfP_XZH_@N7qC;6un^q~7oL*24J$||&jMLNC88(j`o0LyRN3@Ramnu@P}pm78t3r zmdK=Y^YU6kT*@5T-dVKa3h2}K$SPo-jwO;tOYjPMaN%yl6@m|u7bU?hmJ32a5D=)$ z#PNq_`5KErNP)#m8Kp&&hz`To!U^T^_%rk3Djz9L@y~`7p5Ull$L;{u<;xYY_1geq zw1ms}^>*mo^7*PPZ&lpmBYU^ z%c7ipNjTJkO3mUCe6Y4;0o{(N31<=WoFUsr$p9LP-rxp!p0D6i4U0gcG659JXqD}s za%JIQt+wI{mh)CdYG#Q+Uk$2L=P6AEm$Q(y$bnNUXPc$GFbPuX8?aw`?ltfPXYND$ zuPcc}6!1?o7*Ix%H!w&DTZ4>}Kn~wLQ2?oTNF7W>9w@~HO(1X6M;gF3A|rO-GIXA) zqce_oYaQ_dE<_Y14@5Oh6La{U8;ed*s$!TumW7lZQ$p_Ib`vYMFtJzxXM$t`My`Wc zCIyo#<_2&r6b*dJh5%Iu_;c+T|3)xc2wE_)rNfN&DV_ySWdRWf(s>6t{2mUW`obhc zG67HH7=5!`##n)E1~+#+B7_Sq`4QNWJE$q8#3uT)91_vs%)$5%ZIXYQUh3a~j8y|p zE5$IGov7ubu^Wf5_;$SuAj=eQdOHgS;dK&FmOv}V$R#8$wgYnn;_3s0zweN;`{eiZ z{4s660r;c4z!+>;W_sfktevEt5YKvH-@XTz?;Br+CH|Pzr``j{cp8}wnS5a8J@Xe5 zlQCFidC@TX*!bA-Z2R>fFKF2>phFA=lHCOA3#K?A-t2Jgy7e{dM5R#J_iw5XQeQY| zWhIIH#S;cb1jE3W-ybB5&_fc-+qtq;kcUST$<(nt*NcAhIG2ey;?qZrketBvh^eSV z<48-;VHFIo8yfh0v$M15)2l)B1f)}ty2D8su&vjqTcqdM0;futTi86DreA^AG<;9* zy}D2IpFcOItR8$F#mLMii{y`J*%Npw=uX>cTQR8y@Uh05SElkAdnK+ygJd~8tZzGg=ld+GMGv{H{4Y%G&>O=5_le_OyU0E!lED+ zlspti0e1k&EI4X=v90idGskinLq?nU?YhZP8lMOp?j%8M2!y-)oFw?XDaU9f`dH_kq2Ds z5g3~T_t69CiGv?=-4Im4HJgCdiX66EuMt}sb?b%I4=deg2{XD zqjsc2I!+Eihi%A;wCyXKS1`(z@nI8q{C^C^W~pKSXY0yQI0rVBCGI`}aGNhu;XPdpG5eS1TuB za3i^Wzoc literal 0 HcmV?d00001 diff --git a/flyrs/protos/flyteidl/admin/agent.proto b/flyrs/protos/flyteidl/admin/agent.proto deleted file mode 100644 index b84171c8d6..0000000000 --- a/flyrs/protos/flyteidl/admin/agent.proto +++ /dev/null @@ -1,254 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/literals.proto"; -import "flyteidl/core/tasks.proto"; -import "flyteidl/core/workflow.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/metrics.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/struct.proto"; - -// The state of the execution is used to control its visibility in the UI/CLI. -enum State { - option deprecated = true; - RETRYABLE_FAILURE = 0; - PERMANENT_FAILURE = 1; - PENDING = 2; - RUNNING = 3; - SUCCEEDED = 4; -} - -// Represents a subset of runtime task execution metadata that are relevant to external plugins. -message TaskExecutionMetadata { - // ID of the task execution - core.TaskExecutionIdentifier task_execution_id = 1; - // k8s namespace where the task is executed in - string namespace = 2; - // Labels attached to the task execution - map labels = 3; - // Annotations attached to the task execution - map annotations = 4; - // k8s service account associated with the task execution - string k8s_service_account = 5; - // Environment variables attached to the task execution - map environment_variables = 6; - // Represents the maximum number of attempts allowed for a task. - // If a task fails, it can be retried up to this maximum number of attempts. - int32 max_attempts = 7; - // Indicates whether the task execution can be interrupted. - // If set to true, the task can be stopped before completion. - bool interruptible = 8; - // Specifies the threshold for failure count at which the interruptible property - // will take effect. If the number of consecutive task failures exceeds this threshold, - // interruptible behavior will be activated. - int32 interruptible_failure_threshold = 9; - // Overrides for specific properties of the task node. - // These overrides can be used to customize the behavior of the task node. - core.TaskNodeOverrides overrides = 10; -} - -// Represents a request structure to create task. -message CreateTaskRequest { - // The inputs required to start the execution. All required inputs must be - // included in this map. If not required and not provided, defaults apply. - // +optional - core.LiteralMap inputs = 1; - // Template of the task that encapsulates all the metadata of the task. - core.TaskTemplate template = 2; - // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - string output_prefix = 3; - // subset of runtime task execution metadata. - TaskExecutionMetadata task_execution_metadata = 4; -} - -// Represents a create response structure. -message CreateTaskResponse { - // ResourceMeta is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - bytes resource_meta = 1; -} - -message CreateRequestHeader { - // Template of the task that encapsulates all the metadata of the task. - core.TaskTemplate template = 1; - // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - string output_prefix = 2; - // subset of runtime task execution metadata. - TaskExecutionMetadata task_execution_metadata = 3; - // MaxDatasetSizeBytes is the maximum size of the dataset that can be generated by the task. - int64 max_dataset_size_bytes = 4; -} - - -message ExecuteTaskSyncRequest { - oneof part { - CreateRequestHeader header = 1; - core.LiteralMap inputs = 2; - } -} - -message ExecuteTaskSyncResponseHeader { - Resource resource = 1; -} - -message ExecuteTaskSyncResponse { - // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - // Resource is for synchronous task execution. - oneof res { - ExecuteTaskSyncResponseHeader header = 1; - core.LiteralMap outputs = 2; - } -} - -// A message used to fetch a job resource from flyte agent server. -message GetTaskRequest { - // A predefined yet extensible Task type identifier. - string task_type = 1 [deprecated = true]; - // Metadata about the resource to be pass to the agent. - bytes resource_meta = 2; - // A predefined yet extensible Task type identifier. - TaskCategory task_category = 3; -} - -// Response to get an individual task resource. -message GetTaskResponse { - Resource resource = 1; -} - -message Resource { - // DEPRECATED. The state of the execution is used to control its visibility in the UI/CLI. - State state = 1 [deprecated = true]; - // The outputs of the execution. It's typically used by sql task. Agent service will create a - // Structured dataset pointing to the query result table. - // +optional - core.LiteralMap outputs = 2; - // A descriptive message for the current state. e.g. waiting for cluster. - string message = 3; - // log information for the task execution. - repeated core.TaskLog log_links = 4; - // The phase of the execution is used to determine the phase of the plugin's execution. - core.TaskExecution.Phase phase = 5; - // Custom data specific to the agent. - google.protobuf.Struct custom_info = 6; -} - -// A message used to delete a task. -message DeleteTaskRequest { - // A predefined yet extensible Task type identifier. - string task_type = 1 [deprecated = true]; - // Metadata about the resource to be pass to the agent. - bytes resource_meta = 2; - // A predefined yet extensible Task type identifier. - TaskCategory task_category = 3; -} - -// Response to delete a task. -message DeleteTaskResponse {} - -// A message containing the agent metadata. -message Agent { - // Name is the developer-assigned name of the agent. - string name = 1; - - // SupportedTaskTypes are the types of the tasks that the agent can handle. - repeated string supported_task_types = 2 [deprecated = true]; - - // IsSync indicates whether this agent is a sync agent. Sync agents are expected to return their - // results synchronously when called by propeller. Given that sync agents can affect the performance - // of the system, it's important to enforce strict timeout policies. - // An Async agent, on the other hand, is required to be able to identify jobs by an - // identifier and query for job statuses as jobs progress. - bool is_sync = 3; - - // Supported_task_categories are the categories of the tasks that the agent can handle. - repeated TaskCategory supported_task_categories = 4; -} - -message TaskCategory { - // The name of the task type. - string name = 1; - // The version of the task type. - int32 version = 2; -} - -// A request to get an agent. -message GetAgentRequest { - // The name of the agent. - string name = 1; -} - -// A response containing an agent. -message GetAgentResponse { - Agent agent = 1; -} - -// A request to list all agents. -message ListAgentsRequest {} - -// A response containing a list of agents. -message ListAgentsResponse { - repeated Agent agents = 1; -} - -// A request to get the metrics from a task execution. -message GetTaskMetricsRequest { - // A predefined yet extensible Task type identifier. - string task_type = 1 [deprecated = true]; - // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - bytes resource_meta = 2; - // The metrics to query. If empty, will return a default set of metrics. - // e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG - repeated string queries = 3; - // Start timestamp, inclusive. - google.protobuf.Timestamp start_time = 4; - // End timestamp, inclusive.. - google.protobuf.Timestamp end_time = 5; - // Query resolution step width in duration format or float number of seconds. - google.protobuf.Duration step = 6; - // A predefined yet extensible Task type identifier. - TaskCategory task_category = 7; -} - -// A response containing a list of metrics for a task execution. -message GetTaskMetricsResponse { - // The execution metric results. - repeated core.ExecutionMetricResult results = 1; -} - -// A request to get the log from a task execution. -message GetTaskLogsRequest { - // A predefined yet extensible Task type identifier. - string task_type = 1 [deprecated = true]; - // Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - bytes resource_meta = 2; - // Number of lines to return. - uint64 lines = 3; - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 4; - // A predefined yet extensible Task type identifier. - TaskCategory task_category = 5; -} - -message GetTaskLogsResponseHeader { - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 1; -} - -message GetTaskLogsResponseBody { - // The execution log results. - repeated string results = 1; -} - -// A response containing the logs for a task execution. -message GetTaskLogsResponse { - oneof part { - GetTaskLogsResponseHeader header = 1; - GetTaskLogsResponseBody body = 2; - } -} diff --git a/flyrs/protos/flyteidl/admin/cluster_assignment.proto b/flyrs/protos/flyteidl/admin/cluster_assignment.proto deleted file mode 100644 index 6a55798436..0000000000 --- a/flyrs/protos/flyteidl/admin/cluster_assignment.proto +++ /dev/null @@ -1,11 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - - -// Encapsulates specifications for routing an execution onto a specific cluster. -message ClusterAssignment { - reserved 1, 2; - string cluster_pool_name = 3; -} diff --git a/flyrs/protos/flyteidl/admin/common.proto b/flyrs/protos/flyteidl/admin/common.proto deleted file mode 100644 index 6c04b0531a..0000000000 --- a/flyrs/protos/flyteidl/admin/common.proto +++ /dev/null @@ -1,327 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/execution.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/literals.proto"; -import "google/protobuf/timestamp.proto"; - -// Encapsulation of fields that identifies a Flyte resource. -// A Flyte resource can be a task, workflow or launch plan. -// A resource can internally have multiple versions and is uniquely identified -// by project, domain, and name. -message NamedEntityIdentifier { - // Name of the project the resource belongs to. - string project = 1; - // Name of the domain the resource belongs to. - // A domain can be considered as a subset within a specific project. - string domain = 2; - // User provided value for the resource. - // The combination of project + domain + name uniquely identifies the resource. - // +optional - in certain contexts - like 'List API', 'Launch plans' - string name = 3; - - // Optional, org key applied to the resource. - string org = 4; -} - -// The status of the named entity is used to control its visibility in the UI. -enum NamedEntityState { - // By default, all named entities are considered active and under development. - NAMED_ENTITY_ACTIVE = 0; - - // Archived named entities are no longer visible in the UI. - NAMED_ENTITY_ARCHIVED = 1; - - // System generated entities that aren't explicitly created or managed by a user. - SYSTEM_GENERATED = 2; -} - -// Additional metadata around a named entity. -message NamedEntityMetadata { - // Common description across all versions of the entity - // +optional - string description = 1; - - // Shared state across all version of the entity - // At this point in time, only workflow entities can have their state archived. - NamedEntityState state = 2; -} - -// Encapsulates information common to a NamedEntity, a Flyte resource such as a task, -// workflow or launch plan. A NamedEntity is exclusively identified by its resource type -// and identifier. -message NamedEntity { - // Resource type of the named entity. One of Task, Workflow or LaunchPlan. - flyteidl.core.ResourceType resource_type = 1; - NamedEntityIdentifier id = 2; - - // Additional metadata around a named entity. - NamedEntityMetadata metadata = 3; -} - -// Specifies sort ordering in a list request. -message Sort { - enum Direction { - - // By default, fields are sorted in descending order. - DESCENDING = 0; - ASCENDING = 1; - } - // Indicates an attribute to sort the response values. - // +required - string key = 1; - - // Indicates the direction to apply sort key for response values. - // +optional - Direction direction = 2; -} - -// Represents a request structure to list NamedEntityIdentifiers. -message NamedEntityIdentifierListRequest { - // Name of the project that contains the identifiers. - // +required - string project = 1; - - // Name of the domain the identifiers belongs to within the project. - // +required - string domain = 2; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 3; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 4; - - // Specifies how listed entities should be sorted in the response. - // +optional - Sort sort_by = 5; - - // Indicates a list of filters passed as string. - // +optional - string filters = 6; - - // Optional, org key applied to the resource. - string org = 7; -} - -// Represents a request structure to list NamedEntity objects -message NamedEntityListRequest { - // Resource type of the metadata to query. One of Task, Workflow or LaunchPlan. - // +required - flyteidl.core.ResourceType resource_type = 1; - // Name of the project that contains the identifiers. - // +required - string project = 2; - // Name of the domain the identifiers belongs to within the project. - string domain = 3; - // Indicates the number of resources to be returned. - uint32 limit = 4; - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 5; - - // Specifies how listed entities should be sorted in the response. - // +optional - Sort sort_by = 6; - - // Indicates a list of filters passed as string. - // +optional - string filters = 7; - - // Optional, org key applied to the resource. - string org = 8; -} - -// Represents a list of NamedEntityIdentifiers. -message NamedEntityIdentifierList { - // A list of identifiers. - repeated NamedEntityIdentifier entities = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Represents a list of NamedEntityIdentifiers. -message NamedEntityList { - // A list of NamedEntity objects - repeated NamedEntity entities = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// A request to retrieve the metadata associated with a NamedEntityIdentifier -message NamedEntityGetRequest { - // Resource type of the metadata to get. One of Task, Workflow or LaunchPlan. - // +required - flyteidl.core.ResourceType resource_type = 1; - - // The identifier for the named entity for which to fetch metadata. - // +required - NamedEntityIdentifier id = 2; -} - -// Request to set the referenced named entity state to the configured value. -message NamedEntityUpdateRequest { - // Resource type of the metadata to update - // +required - flyteidl.core.ResourceType resource_type = 1; - - // Identifier of the metadata to update - // +required - NamedEntityIdentifier id = 2; - - // Metadata object to set as the new value - // +required - NamedEntityMetadata metadata = 3; -} - -// Purposefully empty, may be populated in the future. -message NamedEntityUpdateResponse { -} - -// Shared request structure to fetch a single resource. -// Resources include: Task, Workflow, LaunchPlan -message ObjectGetRequest { - // Indicates a unique version of resource. - // +required - core.Identifier id = 1; -} - -// Shared request structure to retrieve a list of resources. -// Resources include: Task, Workflow, LaunchPlan -message ResourceListRequest { - // id represents the unique identifier of the resource. - // +required - NamedEntityIdentifier id = 1; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 2; - - // In the case of multiple pages of results, this server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 3; - - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 4; - - // Sort ordering. - // +optional - Sort sort_by = 5; -} - -// Defines an email notification specification. -message EmailNotification { - // The list of email addresses recipients for this notification. - // +required - repeated string recipients_email = 1; -} - -// Defines a pager duty notification specification. -message PagerDutyNotification { - // Currently, PagerDuty notifications leverage email to trigger a notification. - // +required - repeated string recipients_email = 1; -} - -// Defines a slack notification specification. -message SlackNotification { - // Currently, Slack notifications leverage email to trigger a notification. - // +required - repeated string recipients_email = 1; -} - -// Represents a structure for notifications based on execution status. -// The notification content is configured within flyte admin but can be templatized. -// Future iterations could expose configuring notifications with custom content. -message Notification { - // A list of phases to which users can associate the notifications to. - // +required - repeated core.WorkflowExecution.Phase phases = 1; - - // The type of notification to trigger. - // +required - oneof type { - EmailNotification email = 2; - PagerDutyNotification pager_duty = 3; - SlackNotification slack = 4; - } - -} - -// Represents a string url and associated metadata used throughout the platform. -message UrlBlob { - option deprecated = true; - - // Actual url value. - string url = 1; - - // Represents the size of the file accessible at the above url. - int64 bytes = 2; -} - -// Label values to be applied to an execution resource. -// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -// to specify how to merge labels defined at registration and execution time. -message Labels { - // Map of custom labels to be applied to the execution resource. - map values = 1; -} - -// Annotation values to be applied to an execution resource. -// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -// to specify how to merge annotations defined at registration and execution time. -message Annotations { - // Map of custom annotations to be applied to the execution resource. - map values = 1; -} - -// Environment variable values to be applied to an execution resource. -// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -// to specify how to merge environment variables defined at registration and execution time. -message Envs { - // Map of custom environment variables to be applied to the execution resource. - repeated flyteidl.core.KeyValuePair values = 1; -} - -// Defines permissions associated with executions created by this launch plan spec. -// Use either of these roles when they have permissions required by your workflow execution. -// Deprecated. -message AuthRole { - option deprecated = true; - - // Defines an optional iam role which will be used for tasks run in executions created with this launch plan. - string assumable_iam_role = 1; - - // Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. - string kubernetes_service_account = 2; -} - - -// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). -// See https://github.com/flyteorg/flyte/issues/211 for more background information. -message RawOutputDataConfig { - // Prefix for where offloaded data from user workflows will be written - // e.g. s3://bucket/key or s3://bucket/ - string output_location_prefix = 1; -} - -// These URLs are returned as part of node and task execution data requests. -message FlyteURLs { - string inputs = 1; - string outputs = 2; - string deck = 3; -} diff --git a/flyrs/protos/flyteidl/admin/description_entity.proto b/flyrs/protos/flyteidl/admin/description_entity.proto deleted file mode 100644 index 055ca0f4b6..0000000000 --- a/flyrs/protos/flyteidl/admin/description_entity.proto +++ /dev/null @@ -1,95 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/identifier.proto"; -import "flyteidl/admin/common.proto"; - -// DescriptionEntity contains detailed description for the task/workflow. -// Documentation could provide insight into the algorithms, business use case, etc. -message DescriptionEntity { - // id represents the unique identifier of the description entity. - core.Identifier id = 1; - // One-liner overview of the entity. - string short_description = 2; - // Full user description with formatting preserved. - Description long_description = 3; - // Optional link to source code used to define this entity. - SourceCode source_code = 4; - // User-specified tags. These are arbitrary and can be used for searching - // filtering and discovering tasks. - repeated string tags = 5; -} - -// The format of the long description -enum DescriptionFormat { - DESCRIPTION_FORMAT_UNKNOWN = 0; - DESCRIPTION_FORMAT_MARKDOWN = 1; - DESCRIPTION_FORMAT_HTML = 2; - // python default documentation - comments is rst - DESCRIPTION_FORMAT_RST = 3; -} - -// Full user description with formatting preserved. This can be rendered -// by clients, such as the console or command line tools with in-tact -// formatting. -message Description { - oneof content { - // long description - no more than 4KB - string value = 1; - // if the description sizes exceed some threshold we can offload the entire - // description proto altogether to an external data store, like S3 rather than store inline in the db - string uri = 2; - } - - // Format of the long description - DescriptionFormat format = 3; - // Optional link to an icon for the entity - string icon_link = 4; -} - -// Link to source code used to define this entity -message SourceCode { - string link = 1; -} - -// Represents a list of DescriptionEntities returned from the admin. -// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details -message DescriptionEntityList { - // A list of DescriptionEntities returned based on the request. - repeated DescriptionEntity descriptionEntities = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Represents a request structure to retrieve a list of DescriptionEntities. -// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details -message DescriptionEntityListRequest { - // Identifies the specific type of resource that this identifier corresponds to. - flyteidl.core.ResourceType resource_type = 1; - - // The identifier for the description entity. - // +required - NamedEntityIdentifier id = 2; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 3; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 4; - - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 5; - - // Sort ordering for returned list. - // +optional - Sort sort_by = 6; -} diff --git a/flyrs/protos/flyteidl/admin/event.proto b/flyrs/protos/flyteidl/admin/event.proto deleted file mode 100644 index 0762ff78af..0000000000 --- a/flyrs/protos/flyteidl/admin/event.proto +++ /dev/null @@ -1,70 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/event/event.proto"; - -// Indicates that a sent event was not used to update execution state due to -// the referenced execution already being terminated (and therefore ineligible -// for further state transitions). -message EventErrorAlreadyInTerminalState { - // +required - string current_phase = 1; -} - -// Indicates an event was rejected because it came from a different cluster than -// is on record as running the execution. -message EventErrorIncompatibleCluster { - // The cluster which has been recorded as processing the execution. - // +required - string cluster = 1; -} - -// Indicates why a sent event was not used to update execution. -message EventFailureReason { - // +required - oneof reason { - EventErrorAlreadyInTerminalState already_in_terminal_state = 1; - EventErrorIncompatibleCluster incompatible_cluster = 2; - } -} - -// Request to send a notification that a workflow execution event has occurred. -message WorkflowExecutionEventRequest { - // Unique ID for this request that can be traced between services - string request_id = 1; - - // Details about the event that occurred. - event.WorkflowExecutionEvent event = 2; -} - -message WorkflowExecutionEventResponse { - // Purposefully empty, may be populated in the future. -} - -// Request to send a notification that a node execution event has occurred. -message NodeExecutionEventRequest { - // Unique ID for this request that can be traced between services - string request_id = 1; - - // Details about the event that occurred. - event.NodeExecutionEvent event = 2; -} - -message NodeExecutionEventResponse { - // Purposefully empty, may be populated in the future. -} - -// Request to send a notification that a task execution event has occurred. -message TaskExecutionEventRequest { - // Unique ID for this request that can be traced between services - string request_id = 1; - - // Details about the event that occurred. - event.TaskExecutionEvent event = 2; -} - -message TaskExecutionEventResponse { - // Purposefully empty, may be populated in the future. -} diff --git a/flyrs/protos/flyteidl/admin/execution.proto b/flyrs/protos/flyteidl/admin/execution.proto deleted file mode 100644 index cc7fa1d15c..0000000000 --- a/flyrs/protos/flyteidl/admin/execution.proto +++ /dev/null @@ -1,424 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/cluster_assignment.proto"; -import "flyteidl/admin/common.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/artifact_id.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/metrics.proto"; -import "flyteidl/core/security.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; -import "flyteidl/admin/matchable_resource.proto"; - -// Request to launch an execution with the given project, domain and optionally-assigned name. -message ExecutionCreateRequest { - // Name of the project the execution belongs to. - // +required - string project = 1; - - // Name of the domain the execution belongs to. - // A domain can be considered as a subset within a specific project. - // +required - string domain = 2; - - // User provided value for the resource. - // If none is provided the system will generate a unique string. - // +optional - string name = 3; - - // Additional fields necessary to launch the execution. - // +optional - ExecutionSpec spec = 4; - - // The inputs required to start the execution. All required inputs must be - // included in this map. If not required and not provided, defaults apply. - // +optional - core.LiteralMap inputs = 5; - - // Optional, org key applied to the resource. - string org = 6; -} - -// Request to relaunch the referenced execution. -message ExecutionRelaunchRequest { - // Identifier of the workflow execution to relaunch. - // +required - core.WorkflowExecutionIdentifier id = 1; - - // Deprecated field, do not use. - reserved 2; - - // User provided value for the relaunched execution. - // If none is provided the system will generate a unique string. - // +optional - string name = 3; - - // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - // If enabled, all calculations are performed even if cached results would be available, overwriting the stored - // data once execution finishes successfully. - bool overwrite_cache = 4; -} - -// Request to recover the referenced execution. -message ExecutionRecoverRequest { - // Identifier of the workflow execution to recover. - core.WorkflowExecutionIdentifier id = 1; - - // User provided value for the recovered execution. - // If none is provided the system will generate a unique string. - // +optional - string name = 2; - - // Additional metadata which will be used to overwrite any metadata in the reference execution when triggering a recovery execution. - ExecutionMetadata metadata = 3; -} - -// The unique identifier for a successfully created execution. -// If the name was *not* specified in the create request, this identifier will include a generated name. -message ExecutionCreateResponse { - core.WorkflowExecutionIdentifier id = 1; -} - -// A message used to fetch a single workflow execution entity. -// See :ref:`ref_flyteidl.admin.Execution` for more details -message WorkflowExecutionGetRequest { - // Uniquely identifies an individual workflow execution. - core.WorkflowExecutionIdentifier id = 1; -} - -// A workflow execution represents an instantiated workflow, including all inputs and additional -// metadata as well as computed results included state, outputs, and duration-based attributes. -// Used as a response object used in Get and List execution requests. -message Execution { - // Unique identifier of the workflow execution. - core.WorkflowExecutionIdentifier id = 1; - - // User-provided configuration and inputs for launching the execution. - ExecutionSpec spec = 2; - - // Execution results. - ExecutionClosure closure = 3; -} - -// Used as a response for request to list executions. -// See :ref:`ref_flyteidl.admin.Execution` for more details -message ExecutionList { - repeated Execution executions = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Input/output data can represented by actual values or a link to where values are stored -message LiteralMapBlob { - oneof data { - // Data in LiteralMap format - core.LiteralMap values = 1 [deprecated = true]; - - // In the event that the map is too large, we return a uri to the data - string uri = 2; - } -} - -// Specifies metadata around an aborted workflow execution. -message AbortMetadata { - // In the case of a user-specified abort, this will pass along the user-supplied cause. - string cause = 1; - - // Identifies the entity (if any) responsible for terminating the execution - string principal = 2; -} - -// Encapsulates the results of the Execution -message ExecutionClosure { - // A result produced by a terminated execution. - // A pending (non-terminal) execution will not have any output result. - oneof output_result { - // Output URI in the case of a successful execution. - // DEPRECATED. Use GetExecutionData to fetch output data instead. - LiteralMapBlob outputs = 1 [deprecated = true]; - - // Error information in the case of a failed execution. - core.ExecutionError error = 2; - - // In the case of a user-specified abort, this will pass along the user-supplied cause. - string abort_cause = 10 [deprecated = true]; - - // In the case of a user-specified abort, this will pass along the user and their supplied cause. - AbortMetadata abort_metadata = 12; - - // Raw output data produced by this execution. - // DEPRECATED. Use GetExecutionData to fetch output data instead. - core.LiteralMap output_data = 13 [deprecated = true]; - } - - // Inputs computed and passed for execution. - // computed_inputs depends on inputs in ExecutionSpec, fixed and default inputs in launch plan - core.LiteralMap computed_inputs = 3 [deprecated = true]; - - // Most recent recorded phase for the execution. - core.WorkflowExecution.Phase phase = 4; - - // Reported time at which the execution began running. - google.protobuf.Timestamp started_at = 5; - - // The amount of time the execution spent running. - google.protobuf.Duration duration = 6; - - // Reported time at which the execution was created. - google.protobuf.Timestamp created_at = 7; - - // Reported time at which the execution was last updated. - google.protobuf.Timestamp updated_at = 8; - - // The notification settings to use after merging the CreateExecutionRequest and the launch plan - // notification settings. An execution launched with notifications will always prefer that definition - // to notifications defined statically in a launch plan. - repeated Notification notifications = 9; - - // Identifies the workflow definition for this execution. - core.Identifier workflow_id = 11; - - // Provides the details of the last stage change - ExecutionStateChangeDetails state_change_details = 14; -} - -// Represents system, rather than user-facing, metadata about an execution. -message SystemMetadata { - - // Which execution cluster this execution ran on. - string execution_cluster = 1; - - // Which kubernetes namespace the execution ran under. - string namespace = 2; -} - -// Represents attributes about an execution which are not required to launch the execution but are useful to record. -// These attributes are assigned at launch time and do not change. -message ExecutionMetadata { - // The method by which this execution was launched. - enum ExecutionMode { - // The default execution mode, MANUAL implies that an execution was launched by an individual. - MANUAL = 0; - - // A schedule triggered this execution launch. - SCHEDULED = 1; - - // A system process was responsible for launching this execution rather an individual. - SYSTEM = 2; - - // This execution was launched with identical inputs as a previous execution. - RELAUNCH = 3; - - // This execution was triggered by another execution. - CHILD_WORKFLOW = 4; - - // This execution was recovered from another execution. - RECOVERED = 5; - - // Execution was kicked off by the artifact trigger system - TRIGGER = 6; - } - ExecutionMode mode = 1; - - // Identifier of the entity that triggered this execution. - // For systems using back-end authentication any value set here will be discarded in favor of the - // authenticated user context. - string principal = 2; - - // Indicates the nestedness of this execution. - // If a user launches a workflow execution, the default nesting is 0. - // If this execution further launches a workflow (child workflow), the nesting level is incremented by 0 => 1 - // Generally, if workflow at nesting level k launches a workflow then the child workflow will have - // nesting = k + 1. - uint32 nesting = 3; - - // For scheduled executions, the requested time for execution for this specific schedule invocation. - google.protobuf.Timestamp scheduled_at = 4; - - // Which subworkflow node (if any) launched this execution - core.NodeExecutionIdentifier parent_node_execution = 5; - - // Optional, a reference workflow execution related to this execution. - // In the case of a relaunch, this references the original workflow execution. - core.WorkflowExecutionIdentifier reference_execution = 16; - - // Optional, platform-specific metadata about the execution. - // In this the future this may be gated behind an ACL or some sort of authorization. - SystemMetadata system_metadata = 17; - - // Save a list of the artifacts used in this execution for now. This is a list only rather than a mapping - // since we don't have a structure to handle nested ones anyways. - repeated core.ArtifactID artifact_ids = 18; -} - -message NotificationList { - repeated Notification notifications = 1; -} - -// An ExecutionSpec encompasses all data used to launch this execution. The Spec does not change over the lifetime -// of an execution as it progresses across phase changes. -message ExecutionSpec { - // Launch plan to be executed - core.Identifier launch_plan = 1; - - // Input values to be passed for the execution - core.LiteralMap inputs = 2 [deprecated = true]; - - // Metadata for the execution - ExecutionMetadata metadata = 3; - - // This field is deprecated. Do not use. - reserved 4; - - oneof notification_overrides { - // List of notifications based on Execution status transitions - // When this list is not empty it is used rather than any notifications defined in the referenced launch plan. - // When this list is empty, the notifications defined for the launch plan will be applied. - NotificationList notifications = 5; - - // This should be set to true if all notifications are intended to be disabled for this execution. - bool disable_all = 6; - } - - // Labels to apply to the execution resource. - Labels labels = 7; - - // Annotations to apply to the execution resource. - Annotations annotations = 8; - - // Optional: security context override to apply this execution. - core.SecurityContext security_context = 10; - - // Optional: auth override to apply this execution. - AuthRole auth_role = 16 [deprecated = true]; - - // Indicates the runtime priority of the execution. - core.QualityOfService quality_of_service = 17; - - // Controls the maximum number of task nodes that can be run in parallel for the entire workflow. - // This is useful to achieve fairness. Note: MapTasks are regarded as one unit, - // and parallelism/concurrency of MapTasks is independent from this. - int32 max_parallelism = 18; - - // User setting to configure where to store offloaded data (i.e. Blobs, structured datasets, query data, etc.). - // This should be a prefix like s3://my-bucket/my-data - RawOutputDataConfig raw_output_data_config = 19; - - // Controls how to select an available cluster on which this execution should run. - ClusterAssignment cluster_assignment = 20; - - // Allows for the interruptible flag of a workflow to be overwritten for a single execution. - // Omitting this field uses the workflow's value as a default. - // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - // around the bool field. - google.protobuf.BoolValue interruptible = 21; - - // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - // If enabled, all calculations are performed even if cached results would be available, overwriting the stored - // data once execution finishes successfully. - bool overwrite_cache = 22; - - // Environment variables to be set for the execution. - Envs envs = 23; - - // Tags to be set for the execution. - repeated string tags = 24; - - // Execution cluster label to be set for the execution. - ExecutionClusterLabel execution_cluster_label = 25; -} - -// Request to terminate an in-progress execution. This action is irreversible. -// If an execution is already terminated, this request will simply be a no-op. -// This request will fail if it references a non-existent execution. -// If the request succeeds the phase "ABORTED" will be recorded for the termination -// with the optional cause added to the output_result. -message ExecutionTerminateRequest { - // Uniquely identifies the individual workflow execution to be terminated. - core.WorkflowExecutionIdentifier id = 1; - - // Optional reason for aborting. - string cause = 2; -} - -message ExecutionTerminateResponse { - // Purposefully empty, may be populated in the future. -} - -// Request structure to fetch inputs, output and other data produced by an execution. -// By default this data is not returned inline in :ref:`ref_flyteidl.admin.WorkflowExecutionGetRequest` -message WorkflowExecutionGetDataRequest { - // The identifier of the execution for which to fetch inputs and outputs. - core.WorkflowExecutionIdentifier id = 1; -} - -// Response structure for WorkflowExecutionGetDataRequest which contains inputs and outputs for an execution. -message WorkflowExecutionGetDataResponse { - // Signed url to fetch a core.LiteralMap of execution outputs. - // Deprecated: Please use full_outputs instead. - UrlBlob outputs = 1 [deprecated = true]; - - // Signed url to fetch a core.LiteralMap of execution inputs. - // Deprecated: Please use full_inputs instead. - UrlBlob inputs = 2 [deprecated = true]; - - // Full_inputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_inputs = 3; - - // Full_outputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_outputs = 4; -} - -// The state of the execution is used to control its visibility in the UI/CLI. -enum ExecutionState { - // By default, all executions are considered active. - EXECUTION_ACTIVE = 0; - - // Archived executions are no longer visible in the UI. - EXECUTION_ARCHIVED = 1; -} - -message ExecutionUpdateRequest { - // Identifier of the execution to update - core.WorkflowExecutionIdentifier id = 1; - - // State to set as the new value active/archive - ExecutionState state = 2; -} - -message ExecutionStateChangeDetails { - // The state of the execution is used to control its visibility in the UI/CLI. - ExecutionState state = 1; - - // This timestamp represents when the state changed. - google.protobuf.Timestamp occurred_at = 2; - - // Identifies the entity (if any) responsible for causing the state change of the execution - string principal = 3; -} - -message ExecutionUpdateResponse {} - -// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. -message WorkflowExecutionGetMetricsRequest { - // id defines the workflow execution to query for. - core.WorkflowExecutionIdentifier id = 1; - - // depth defines the number of Flyte entity levels to traverse when breaking down execution details. - int32 depth = 2; -} - -// WorkflowExecutionGetMetricsResponse represents the response containing metrics for the specified workflow execution. -message WorkflowExecutionGetMetricsResponse { - // Span defines the top-level breakdown of the workflows execution. More precise information is nested in a - // hierarchical structure using Flyte entity references. - core.Span span = 1; -} diff --git a/flyrs/protos/flyteidl/admin/launch_plan.proto b/flyrs/protos/flyteidl/admin/launch_plan.proto deleted file mode 100644 index bbb0abda22..0000000000 --- a/flyrs/protos/flyteidl/admin/launch_plan.proto +++ /dev/null @@ -1,222 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/execution.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/interface.proto"; -import "flyteidl/core/security.proto"; -import "flyteidl/admin/schedule.proto"; -import "flyteidl/admin/common.proto"; -import "google/protobuf/any.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; - - -// Request to register a launch plan. The included LaunchPlanSpec may have a complete or incomplete set of inputs required -// to launch a workflow execution. By default all launch plans are registered in state INACTIVE. If you wish to -// set the state to ACTIVE, you must submit a LaunchPlanUpdateRequest, after you have successfully created a launch plan. -message LaunchPlanCreateRequest { - // Uniquely identifies a launch plan entity. - core.Identifier id = 1; - - // User-provided launch plan details, including reference workflow, inputs and other metadata. - LaunchPlanSpec spec = 2; -} - -message LaunchPlanCreateResponse { - // Purposefully empty, may be populated in the future. -} - -// By default any launch plan regardless of state can be used to launch a workflow execution. -// However, at most one version of a launch plan -// (e.g. a NamedEntityIdentifier set of shared project, domain and name values) can be -// active at a time in regards to *schedules*. That is, at most one schedule in a NamedEntityIdentifier -// group will be observed and trigger executions at a defined cadence. -enum LaunchPlanState { - INACTIVE = 0; - ACTIVE = 1; -} - -// A LaunchPlan provides the capability to templatize workflow executions. -// Launch plans simplify associating one or more schedules, inputs and notifications with your workflows. -// Launch plans can be shared and used to trigger executions with predefined inputs even when a workflow -// definition doesn't necessarily have a default value for said input. -message LaunchPlan { - // Uniquely identifies a launch plan entity. - core.Identifier id = 1; - - // User-provided launch plan details, including reference workflow, inputs and other metadata. - LaunchPlanSpec spec = 2; - - // Values computed by the flyte platform after launch plan registration. - LaunchPlanClosure closure = 3; -} - -// Response object for list launch plan requests. -// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -message LaunchPlanList { - repeated LaunchPlan launch_plans = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Defines permissions associated with executions created by this launch plan spec. -// Use either of these roles when they have permissions required by your workflow execution. -// Deprecated. -message Auth { - option deprecated = true; - - // Defines an optional iam role which will be used for tasks run in executions created with this launch plan. - string assumable_iam_role = 1; - - // Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. - string kubernetes_service_account = 2; -} - -// User-provided launch plan definition and configuration values. -message LaunchPlanSpec { - // Reference to the Workflow template that the launch plan references - core.Identifier workflow_id = 1; - - // Metadata for the Launch Plan - LaunchPlanMetadata entity_metadata = 2; - - // Input values to be passed for the execution. - // These can be overridden when an execution is created with this launch plan. - core.ParameterMap default_inputs = 3; - - // Fixed, non-overridable inputs for the Launch Plan. - // These can not be overridden when an execution is created with this launch plan. - core.LiteralMap fixed_inputs = 4; - - // String to indicate the role to use to execute the workflow underneath - string role = 5 [deprecated = true]; - - // Custom labels to be applied to the execution resource. - Labels labels = 6; - - // Custom annotations to be applied to the execution resource. - Annotations annotations = 7; - - // Indicates the permission associated with workflow executions triggered with this launch plan. - Auth auth = 8 [deprecated = true]; - - AuthRole auth_role = 9 [deprecated = true]; - - // Indicates security context for permissions triggered with this launch plan - core.SecurityContext security_context = 10; - - // Indicates the runtime priority of the execution. - core.QualityOfService quality_of_service = 16; - - // Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). - RawOutputDataConfig raw_output_data_config = 17; - - // Controls the maximum number of tasknodes that can be run in parallel for the entire workflow. - // This is useful to achieve fairness. Note: MapTasks are regarded as one unit, - // and parallelism/concurrency of MapTasks is independent from this. - int32 max_parallelism = 18; - - // Allows for the interruptible flag of a workflow to be overwritten for a single execution. - // Omitting this field uses the workflow's value as a default. - // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - // around the bool field. - google.protobuf.BoolValue interruptible = 19; - - // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - // If enabled, all calculations are performed even if cached results would be available, overwriting the stored - // data once execution finishes successfully. - bool overwrite_cache = 20; - - // Environment variables to be set for the execution. - Envs envs = 21; -} - -// Values computed by the flyte platform after launch plan registration. -// These include expected_inputs required to be present in a CreateExecutionRequest -// to launch the reference workflow as well timestamp values associated with the launch plan. -message LaunchPlanClosure { - // Indicate the Launch plan state. - LaunchPlanState state = 1; - - // Indicates the set of inputs expected when creating an execution with the Launch plan - core.ParameterMap expected_inputs = 2; - - // Indicates the set of outputs expected to be produced by creating an execution with the Launch plan - core.VariableMap expected_outputs = 3; - - // Time at which the launch plan was created. - google.protobuf.Timestamp created_at = 4; - - // Time at which the launch plan was last updated. - google.protobuf.Timestamp updated_at = 5; -} - -// Additional launch plan attributes included in the LaunchPlanSpec not strictly required to launch -// the reference workflow. -message LaunchPlanMetadata { - // Schedule to execute the Launch Plan - Schedule schedule = 1; - - // List of notifications based on Execution status transitions - repeated Notification notifications = 2; - - // Additional metadata for how to launch the launch plan - google.protobuf.Any launch_conditions = 3; -} - -// Request to set the referenced launch plan state to the configured value. -// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -message LaunchPlanUpdateRequest { - // Identifier of launch plan for which to change state. - // +required. - core.Identifier id = 1; - - // Desired state to apply to the launch plan. - // +required. - LaunchPlanState state = 2; -} - -// Purposefully empty, may be populated in the future. -message LaunchPlanUpdateResponse { -} - -// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier -// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -message ActiveLaunchPlanRequest { - // +required. - NamedEntityIdentifier id = 1; -} - -// Represents a request structure to list active launch plans within a project/domain and optional org. -// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -message ActiveLaunchPlanListRequest { - // Name of the project that contains the identifiers. - // +required. - string project = 1; - - // Name of the domain the identifiers belongs to within the project. - // +required. - string domain = 2; - - // Indicates the number of resources to be returned. - // +required. - uint32 limit = 3; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 4; - - // Sort ordering. - // +optional - Sort sort_by = 5; - - // Optional, org key applied to the resource. - string org = 6; -} diff --git a/flyrs/protos/flyteidl/admin/matchable_resource.proto b/flyrs/protos/flyteidl/admin/matchable_resource.proto deleted file mode 100644 index e379bf1573..0000000000 --- a/flyrs/protos/flyteidl/admin/matchable_resource.proto +++ /dev/null @@ -1,190 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/common.proto"; -import "flyteidl/admin/cluster_assignment.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/security.proto"; -import "google/protobuf/wrappers.proto"; - -// Defines a resource that can be configured by customizable Project-, ProjectDomain- or WorkflowAttributes -// based on matching tags. -enum MatchableResource { - // Applies to customizable task resource requests and limits. - TASK_RESOURCE = 0; - - // Applies to configuring templated kubernetes cluster resources. - CLUSTER_RESOURCE = 1; - - // Configures task and dynamic task execution queue assignment. - EXECUTION_QUEUE = 2; - - // Configures the K8s cluster label to be used for execution to be run - EXECUTION_CLUSTER_LABEL = 3; - - // Configures default quality of service when undefined in an execution spec. - QUALITY_OF_SERVICE_SPECIFICATION = 4; - - // Selects configurable plugin implementation behavior for a given task type. - PLUGIN_OVERRIDE = 5; - - // Adds defaults for customizable workflow-execution specifications and overrides. - WORKFLOW_EXECUTION_CONFIG = 6; - - // Controls how to select an available cluster on which this execution should run. - CLUSTER_ASSIGNMENT = 7; -} - -// Defines a set of overridable task resource attributes set during task registration. -message TaskResourceSpec { - string cpu = 1; - - string gpu = 2; - - string memory = 3; - - string storage = 4; - - string ephemeral_storage = 5; -} - -// Defines task resource defaults and limits that will be applied at task registration. -message TaskResourceAttributes { - TaskResourceSpec defaults = 1; - - TaskResourceSpec limits = 2; -} - -message ClusterResourceAttributes { - // Custom resource attributes which will be applied in cluster resource creation (e.g. quotas). - // Map keys are the *case-sensitive* names of variables in templatized resource files. - // Map values should be the custom values which get substituted during resource creation. - map attributes = 1; -} - -message ExecutionQueueAttributes { - // Tags used for assigning execution queues for tasks defined within this project. - repeated string tags = 1; -} - -message ExecutionClusterLabel { - // Label value to determine where the execution will be run - string value = 1; -} - -// This MatchableAttribute configures selecting alternate plugin implementations for a given task type. -// In addition to an override implementation a selection of fallbacks can be provided or other modes -// for handling cases where the desired plugin override is not enabled in a given Flyte deployment. -message PluginOverride { - // A predefined yet extensible Task type identifier. - string task_type = 1; - - // A set of plugin ids which should handle tasks of this type instead of the default registered plugin. The list will be tried in order until a plugin is found with that id. - repeated string plugin_id = 2; - - enum MissingPluginBehavior { - // By default, if this plugin is not enabled for a Flyte deployment then execution will fail. - FAIL = 0; - - // Uses the system-configured default implementation. - USE_DEFAULT = 1; - } - - // Defines the behavior when no plugin from the plugin_id list is not found. - MissingPluginBehavior missing_plugin_behavior = 4; -} - - -message PluginOverrides { - repeated PluginOverride overrides = 1; -} - -// Adds defaults for customizable workflow-execution specifications and overrides. -message WorkflowExecutionConfig { - // Can be used to control the number of parallel nodes to run within the workflow. This is useful to achieve fairness. - int32 max_parallelism = 1; - - // Indicates security context permissions for executions triggered with this matchable attribute. - core.SecurityContext security_context = 2; - - // Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). - RawOutputDataConfig raw_output_data_config = 3; - - // Custom labels to be applied to a triggered execution resource. - Labels labels = 4; - - // Custom annotations to be applied to a triggered execution resource. - Annotations annotations = 5; - - // Allows for the interruptible flag of a workflow to be overwritten for a single execution. - // Omitting this field uses the workflow's value as a default. - // As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - // around the bool field. - google.protobuf.BoolValue interruptible = 6; - - // Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - // If enabled, all calculations are performed even if cached results would be available, overwriting the stored - // data once execution finishes successfully. - bool overwrite_cache = 7; - - // Environment variables to be set for the execution. - Envs envs = 8; -} - -// Generic container for encapsulating all types of the above attributes messages. -message MatchingAttributes { - oneof target { - TaskResourceAttributes task_resource_attributes = 1; - - ClusterResourceAttributes cluster_resource_attributes = 2; - - ExecutionQueueAttributes execution_queue_attributes = 3; - - ExecutionClusterLabel execution_cluster_label = 4; - - core.QualityOfService quality_of_service = 5; - - PluginOverrides plugin_overrides = 6; - - WorkflowExecutionConfig workflow_execution_config = 7; - - ClusterAssignment cluster_assignment = 8; - } -} - -// Represents a custom set of attributes applied for either a domain (and optional org); a domain and project (and optional org); -// or domain, project and workflow name (and optional org). -// These are used to override system level defaults for kubernetes cluster resource management, -// default execution values, and more all across different levels of specificity. -message MatchableAttributesConfiguration { - MatchingAttributes attributes = 1; - - string domain = 2; - - string project = 3; - - string workflow = 4; - - string launch_plan = 5; - - // Optional, org key applied to the resource. - string org = 6; -} - -// Request all matching resource attributes for a resource type. -// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details -message ListMatchableAttributesRequest { - // +required - MatchableResource resource_type = 1; - - // Optional, org filter applied to list project requests. - string org = 2; -} - -// Response for a request for all matching resource attributes for a resource type. -// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details -message ListMatchableAttributesResponse { - repeated MatchableAttributesConfiguration configurations = 1; -} diff --git a/flyrs/protos/flyteidl/admin/node_execution.proto b/flyrs/protos/flyteidl/admin/node_execution.proto deleted file mode 100644 index ac672ad49f..0000000000 --- a/flyrs/protos/flyteidl/admin/node_execution.proto +++ /dev/null @@ -1,245 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/common.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/catalog.proto"; -import "flyteidl/core/compiler.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/literals.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; - -// A message used to fetch a single node execution entity. -// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -message NodeExecutionGetRequest { - - // Uniquely identifies an individual node execution. - // +required - core.NodeExecutionIdentifier id = 1; -} - -// Represents a request structure to retrieve a list of node execution entities. -// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -message NodeExecutionListRequest { - // Indicates the workflow execution to filter by. - // +required - core.WorkflowExecutionIdentifier workflow_execution_id = 1; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 2; - - // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page - // in a query. - // +optional - - string token = 3; - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 4; - - // Sort ordering. - // +optional - Sort sort_by = 5; - - // Unique identifier of the parent node in the execution - // +optional - string unique_parent_id = 6; -} - -// Represents a request structure to retrieve a list of node execution entities launched by a specific task. -// This can arise when a task yields a subworkflow. -message NodeExecutionForTaskListRequest { - // Indicates the node execution to filter by. - // +required - core.TaskExecutionIdentifier task_execution_id = 1; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 2; - - // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 3; - - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 4; - - // Sort ordering. - // +optional - Sort sort_by = 5; -} - -// Encapsulates all details for a single node execution entity. -// A node represents a component in the overall workflow graph. A node launch a task, multiple tasks, an entire nested -// sub-workflow, or even a separate child-workflow execution. -// The same task can be called repeatedly in a single workflow but each node is unique. -message NodeExecution { - - // Uniquely identifies an individual node execution. - core.NodeExecutionIdentifier id = 1; - - // Path to remote data store where input blob is stored. - string input_uri = 2; - - // Computed results associated with this node execution. - NodeExecutionClosure closure = 3; - - // Metadata for Node Execution - NodeExecutionMetaData metadata = 4; -} - -// Represents additional attributes related to a Node Execution -message NodeExecutionMetaData { - // Node executions are grouped depending on retries of the parent - // Retry group is unique within the context of a parent node. - string retry_group = 1; - - // Boolean flag indicating if the node has child nodes under it - // This can be true when a node contains a dynamic workflow which then produces - // child nodes. - bool is_parent_node = 2; - - // Node id of the node in the original workflow - // This maps to value of WorkflowTemplate.nodes[X].id - string spec_node_id = 3; - - // Boolean flag indicating if the node has contains a dynamic workflow which then produces child nodes. - // This is to distinguish between subworkflows and dynamic workflows which can both have is_parent_node as true. - bool is_dynamic = 4; - - // Boolean flag indicating if the node is an array node. This is intended to uniquely identify - // array nodes from other nodes which can have is_parent_node as true. - bool is_array = 5; -} - -// Request structure to retrieve a list of node execution entities. -// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -message NodeExecutionList { - repeated NodeExecution node_executions = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Container for node execution details and results. -message NodeExecutionClosure { - // Only a node in a terminal state will have a non-empty output_result. - oneof output_result { - // Links to a remotely stored, serialized core.LiteralMap of node execution outputs. - // DEPRECATED. Use GetNodeExecutionData to fetch output data instead. - string output_uri = 1 [deprecated = true]; - - // Error information for the Node - core.ExecutionError error = 2; - - // Raw output data produced by this node execution. - // DEPRECATED. Use GetNodeExecutionData to fetch output data instead. - core.LiteralMap output_data = 10 [deprecated = true]; - } - - // The last recorded phase for this node execution. - core.NodeExecution.Phase phase = 3; - - // Time at which the node execution began running. - google.protobuf.Timestamp started_at = 4; - - // The amount of time the node execution spent running. - google.protobuf.Duration duration = 5; - - // Time at which the node execution was created. - google.protobuf.Timestamp created_at = 6; - - // Time at which the node execution was last updated. - google.protobuf.Timestamp updated_at = 7; - - // Store metadata for what the node launched. - // for ex: if this is a workflow node, we store information for the launched workflow. - oneof target_metadata { - WorkflowNodeMetadata workflow_node_metadata = 8; - TaskNodeMetadata task_node_metadata = 9; - } - - // String location uniquely identifying where the deck HTML file is. - // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - string deck_uri = 11; - - // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for a DynamicWorkflow. This is required - // to correctly recover partially completed executions where the subworkflow has already been compiled. - string dynamic_job_spec_uri = 12; -} - -// Metadata for a WorkflowNode -message WorkflowNodeMetadata { - // The identifier for a workflow execution launched by a node. - core.WorkflowExecutionIdentifier executionId = 1; -} - -// Metadata for the case in which the node is a TaskNode -message TaskNodeMetadata { - // Captures the status of caching for this execution. - core.CatalogCacheStatus cache_status = 1; - // This structure carries the catalog artifact information - core.CatalogMetadata catalog_key = 2; - // The latest checkpoint location - string checkpoint_uri = 4; -} - -// For dynamic workflow nodes we capture information about the dynamic workflow definition that gets generated. -message DynamicWorkflowNodeMetadata { - // id represents the unique identifier of the workflow. - core.Identifier id = 1; - - // Represents the compiled representation of the embedded dynamic workflow. - core.CompiledWorkflowClosure compiled_workflow = 2; - - // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is - // required to correctly recover partially completed executions where the subworkflow has already been compiled. - string dynamic_job_spec_uri = 3; -} - -// Request structure to fetch inputs and output for a node execution. -// By default, these are not returned in :ref:`ref_flyteidl.admin.NodeExecutionGetRequest` -message NodeExecutionGetDataRequest { - // The identifier of the node execution for which to fetch inputs and outputs. - core.NodeExecutionIdentifier id = 1; -} - -// Response structure for NodeExecutionGetDataRequest which contains inputs and outputs for a node execution. -message NodeExecutionGetDataResponse { - // Signed url to fetch a core.LiteralMap of node execution inputs. - // Deprecated: Please use full_inputs instead. - UrlBlob inputs = 1 [deprecated = true]; - - // Signed url to fetch a core.LiteralMap of node execution outputs. - // Deprecated: Please use full_outputs instead. - UrlBlob outputs = 2 [deprecated = true]; - - // Full_inputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_inputs = 3; - - // Full_outputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_outputs = 4; - - // Optional Workflow closure for a dynamically generated workflow, in the case this node yields a dynamic workflow we return its structure here. - DynamicWorkflowNodeMetadata dynamic_workflow = 16; - - FlyteURLs flyte_urls = 17; - -} - -message GetDynamicNodeWorkflowRequest { - core.NodeExecutionIdentifier id = 1; -} - -message DynamicNodeWorkflowResponse { - core.CompiledWorkflowClosure compiled_workflow = 1; -} diff --git a/flyrs/protos/flyteidl/admin/notification.proto b/flyrs/protos/flyteidl/admin/notification.proto deleted file mode 100644 index 9ef54c9794..0000000000 --- a/flyrs/protos/flyteidl/admin/notification.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -// Represents the Email object that is sent to a publisher/subscriber -// to forward the notification. -// Note: This is internal to Admin and doesn't need to be exposed to other components. -message EmailMessage { - // The list of email addresses to receive an email with the content populated in the other fields. - // Currently, each email recipient will receive its own email. - // This populates the TO field. - repeated string recipients_email = 1; - - // The email of the sender. - // This populates the FROM field. - string sender_email = 2; - - // The content of the subject line. - // This populates the SUBJECT field. - string subject_line = 3; - - // The content of the email body. - // This populates the BODY field. - string body = 4; -} diff --git a/flyrs/protos/flyteidl/admin/project.proto b/flyrs/protos/flyteidl/admin/project.proto deleted file mode 100644 index 62e61b032d..0000000000 --- a/flyrs/protos/flyteidl/admin/project.proto +++ /dev/null @@ -1,110 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - - -import "flyteidl/admin/common.proto"; - -// Namespace within a project commonly used to differentiate between different service instances. -// e.g. "production", "development", etc. -message Domain { - // Globally unique domain name. - string id = 1; - - // Display name. - string name = 2; -} - - -// Top-level namespace used to classify different entities like workflows and executions. -message Project { - // Globally unique project name. - string id = 1; - - // Display name. - string name = 2; - - repeated Domain domains = 3; - - string description = 4; - - // Leverage Labels from flyteidl.admin.common.proto to - // tag projects with ownership information. - Labels labels = 5; - - // The state of the project is used to control its visibility in the UI and validity. - enum ProjectState { - // By default, all projects are considered active. - ACTIVE = 0; - - // Archived projects are no longer visible in the UI and no longer valid. - ARCHIVED = 1; - - // System generated projects that aren't explicitly created or managed by a user. - SYSTEM_GENERATED = 2; - } - ProjectState state = 6; - - // Optional, org key applied to the resource. - string org = 7; -} - -// Represents a list of projects. -// See :ref:`ref_flyteidl.admin.Project` for more details -message Projects { - repeated Project projects = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Request to retrieve a list of projects matching specified filters. -// See :ref:`ref_flyteidl.admin.Project` for more details -message ProjectListRequest { - // Indicates the number of projects to be returned. - // +required - uint32 limit = 1; - - // In the case of multiple pages of results, this server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 2; - - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 3; - - // Sort ordering. - // +optional - Sort sort_by = 4; - - // Optional, org filter applied to list project requests. - string org = 5; -} - -// Adds a new user-project within the Flyte deployment. -// See :ref:`ref_flyteidl.admin.Project` for more details -message ProjectRegisterRequest { - // +required - Project project = 1; -} - -// Purposefully empty, may be updated in the future. -message ProjectRegisterResponse { -} - -// Purposefully empty, may be updated in the future. -message ProjectUpdateResponse { -} - -message ProjectGetRequest { - // Indicates a unique project. - // +required - string id = 1; - - // Optional, org key applied to the resource. - string org = 2; -} diff --git a/flyrs/protos/flyteidl/admin/project_attributes.proto b/flyrs/protos/flyteidl/admin/project_attributes.proto deleted file mode 100644 index 2656ab25f5..0000000000 --- a/flyrs/protos/flyteidl/admin/project_attributes.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/matchable_resource.proto"; - -// Defines a set of custom matching attributes at the project level. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectAttributes { - // Unique project id for which this set of attributes will be applied. - string project = 1; - - MatchingAttributes matching_attributes = 2; - - // Optional, org key applied to the project. - string org = 3; -} - -// Sets custom attributes for a project -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectAttributesUpdateRequest { - // +required - ProjectAttributes attributes = 1; -} - -// Purposefully empty, may be populated in the future. -message ProjectAttributesUpdateResponse { -} - -// Request to get an individual project level attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectAttributesGetRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Which type of matchable attributes to return. - // +required - MatchableResource resource_type = 2; - - // Optional, org key applied to the project. - string org = 3; -} - -// Response to get an individual project level attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectAttributesGetResponse { - ProjectAttributes attributes = 1; -} - -// Request to delete a set matchable project level attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectAttributesDeleteRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Which type of matchable attributes to delete. - // +required - MatchableResource resource_type = 2; - - // Optional, org key applied to the project. - string org = 3; -} - -// Purposefully empty, may be populated in the future. -message ProjectAttributesDeleteResponse { -} diff --git a/flyrs/protos/flyteidl/admin/project_domain_attributes.proto b/flyrs/protos/flyteidl/admin/project_domain_attributes.proto deleted file mode 100644 index b493ae1178..0000000000 --- a/flyrs/protos/flyteidl/admin/project_domain_attributes.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/matchable_resource.proto"; - -// Defines a set of custom matching attributes which defines resource defaults for a project and domain. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectDomainAttributes { - // Unique project id for which this set of attributes will be applied. - string project = 1; - - // Unique domain id for which this set of attributes will be applied. - string domain = 2; - - MatchingAttributes matching_attributes = 3; - - // Optional, org key applied to the attributes. - string org = 4; -} - -// Sets custom attributes for a project-domain combination. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectDomainAttributesUpdateRequest { - // +required - ProjectDomainAttributes attributes = 1; -} - -// Purposefully empty, may be populated in the future. -message ProjectDomainAttributesUpdateResponse { -} - -// Request to get an individual project domain attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectDomainAttributesGetRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Unique domain id which this set of attributes references. - // +required - string domain = 2; - - // Which type of matchable attributes to return. - // +required - MatchableResource resource_type = 3; - - // Optional, org key applied to the attributes. - string org = 4; -} - -// Response to get an individual project domain attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectDomainAttributesGetResponse { - ProjectDomainAttributes attributes = 1; -} - -// Request to delete a set matchable project domain attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message ProjectDomainAttributesDeleteRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Unique domain id which this set of attributes references. - // +required - string domain = 2; - - // Which type of matchable attributes to delete. - // +required - MatchableResource resource_type = 3; - - // Optional, org key applied to the attributes. - string org = 4; -} - -// Purposefully empty, may be populated in the future. -message ProjectDomainAttributesDeleteResponse { -} diff --git a/flyrs/protos/flyteidl/admin/schedule.proto b/flyrs/protos/flyteidl/admin/schedule.proto deleted file mode 100644 index 6bcbd90140..0000000000 --- a/flyrs/protos/flyteidl/admin/schedule.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -// Represents a frequency at which to run a schedule. -enum FixedRateUnit { - MINUTE = 0; - HOUR = 1; - DAY = 2; -} - -// Option for schedules run at a certain frequency e.g. every 2 minutes. -message FixedRate { - uint32 value = 1; - FixedRateUnit unit = 2; -} - -// Options for schedules to run according to a cron expression. -message CronSchedule { - // Standard/default cron implementation as described by https://en.wikipedia.org/wiki/Cron#CRON_expression; - // Also supports nonstandard predefined scheduling definitions - // as described by https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions - // except @reboot - string schedule = 1; - // ISO 8601 duration as described by https://en.wikipedia.org/wiki/ISO_8601#Durations - string offset = 2; -} - -// Defines complete set of information required to trigger an execution on a schedule. -message Schedule { - - oneof ScheduleExpression { - // Uses AWS syntax: Minutes Hours Day-of-month Month Day-of-week Year - // e.g. for a schedule that runs every 15 minutes: 0/15 * * * ? * - string cron_expression = 1 [deprecated=true]; - FixedRate rate = 2; - CronSchedule cron_schedule = 4; - } - - // Name of the input variable that the kickoff time will be supplied to when the workflow is kicked off. - string kickoff_time_input_arg = 3; -} diff --git a/flyrs/protos/flyteidl/admin/signal.proto b/flyrs/protos/flyteidl/admin/signal.proto deleted file mode 100644 index aad8437bc2..0000000000 --- a/flyrs/protos/flyteidl/admin/signal.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/common.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/types.proto"; - -// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. -// See :ref:`ref_flyteidl.admin.Signal` for more details -message SignalGetOrCreateRequest { - // A unique identifier for the requested signal. - core.SignalIdentifier id = 1; - - // A type denoting the required value type for this signal. - core.LiteralType type = 2; -} - -// SignalListRequest represents a request structure to retrieve a collection of signals. -// See :ref:`ref_flyteidl.admin.Signal` for more details -message SignalListRequest { - // Indicates the workflow execution to filter by. - // +required - core.WorkflowExecutionIdentifier workflow_execution_id = 1; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 2; - - // In the case of multiple pages of results, the, server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 3; - - // Indicates a list of filters passed as string. - // +optional - string filters = 4; - - // Sort ordering. - // +optional - Sort sort_by = 5; -} - -// SignalList represents collection of signals along with the token of the last result. -// See :ref:`ref_flyteidl.admin.Signal` for more details -message SignalList { - // A list of signals matching the input filters. - repeated Signal signals = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// SignalSetRequest represents a request structure to set the value on a signal. Setting a signal -// effetively satisfies the signal condition within a Flyte workflow. -// See :ref:`ref_flyteidl.admin.Signal` for more details -message SignalSetRequest { - // A unique identifier for the requested signal. - core.SignalIdentifier id = 1; - - // The value of this signal, must match the defining signal type. - core.Literal value = 2; -} - -// SignalSetResponse represents a response structure if signal setting succeeds. -message SignalSetResponse { - // Purposefully empty, may be populated in the future. -} - -// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte -// signal. Signals may exist either without a set value (representing a signal request) or with a -// populated value (indicating the signal has been given). -message Signal { - // A unique identifier for the requested signal. - core.SignalIdentifier id = 1; - - // A type denoting the required value type for this signal. - core.LiteralType type = 2; - - // The value of the signal. This is only available if the signal has been "set" and must match - // the defined the type. - core.Literal value = 3; -} diff --git a/flyrs/protos/flyteidl/admin/task.proto b/flyrs/protos/flyteidl/admin/task.proto deleted file mode 100644 index 6185d6fbba..0000000000 --- a/flyrs/protos/flyteidl/admin/task.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/tasks.proto"; -import "flyteidl/core/compiler.proto"; -import "flyteidl/admin/description_entity.proto"; -import "google/protobuf/timestamp.proto"; - -// Represents a request structure to create a revision of a task. -// See :ref:`ref_flyteidl.admin.Task` for more details -message TaskCreateRequest { - // id represents the unique identifier of the task. - // +required - core.Identifier id = 1; - - // Represents the specification for task. - // +required - TaskSpec spec = 2; -} - -// Represents a response structure if task creation succeeds. -message TaskCreateResponse { - // Purposefully empty, may be populated in the future. -} - -// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks -// arranged to process workflow inputs and produce a deterministic set of outputs. -// Tasks can come in many varieties tuned for specialized behavior. -message Task { - // id represents the unique identifier of the task. - core.Identifier id = 1; - - // closure encapsulates all the fields that maps to a compiled version of the task. - TaskClosure closure = 2; - - // One-liner overview of the entity. - string short_description = 3; -} - -// Represents a list of tasks returned from the admin. -// See :ref:`ref_flyteidl.admin.Task` for more details -message TaskList { - // A list of tasks returned based on the request. - repeated Task tasks = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Represents a structure that encapsulates the user-configured specification of the task. -message TaskSpec { - // Template of the task that encapsulates all the metadata of the task. - core.TaskTemplate template = 1; - - // Represents the specification for description entity. - DescriptionEntity description = 2; -} - -// Compute task attributes which include values derived from the TaskSpec, as well as plugin-specific data -// and task metadata. -message TaskClosure { - // Represents the compiled representation of the task from the specification provided. - core.CompiledTask compiled_task = 1; - - // Time at which the task was created. - google.protobuf.Timestamp created_at = 2; -} diff --git a/flyrs/protos/flyteidl/admin/task_execution.proto b/flyrs/protos/flyteidl/admin/task_execution.proto deleted file mode 100644 index 54d2ff1e61..0000000000 --- a/flyrs/protos/flyteidl/admin/task_execution.proto +++ /dev/null @@ -1,168 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/common.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/event/event.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -// A message used to fetch a single task execution entity. -// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -message TaskExecutionGetRequest { - // Unique identifier for the task execution. - // +required - core.TaskExecutionIdentifier id = 1; -} - -// Represents a request structure to retrieve a list of task execution entities yielded by a specific node execution. -// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -message TaskExecutionListRequest { - // Indicates the node execution to filter by. - // +required - core.NodeExecutionIdentifier node_execution_id = 1; - - // Indicates the number of resources to be returned. - // +required - uint32 limit = 2; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. - // +optional - string token = 3; - - // Indicates a list of filters passed as string. - // More info on constructing filters : - // +optional - string filters = 4; - - // Sort ordering for returned list. - // +optional - Sort sort_by = 5; -} - -// Encapsulates all details for a single task execution entity. -// A task execution represents an instantiated task, including all inputs and additional -// metadata as well as computed results included state, outputs, and duration-based attributes. -message TaskExecution { - // Unique identifier for the task execution. - core.TaskExecutionIdentifier id = 1; - - // Path to remote data store where input blob is stored. - string input_uri = 2; - - // Task execution details and results. - TaskExecutionClosure closure = 3; - - // Whether this task spawned nodes. - bool is_parent = 4; -} - -// Response structure for a query to list of task execution entities. -// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -message TaskExecutionList { - repeated TaskExecution task_executions = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Container for task execution details and results. -message TaskExecutionClosure { - oneof output_result { - // Path to remote data store where output blob is stored if the execution succeeded (and produced outputs). - // DEPRECATED. Use GetTaskExecutionData to fetch output data instead. - string output_uri = 1 [deprecated = true]; - - // Error information for the task execution. Populated if the execution failed. - core.ExecutionError error = 2; - - // Raw output data produced by this task execution. - // DEPRECATED. Use GetTaskExecutionData to fetch output data instead. - core.LiteralMap output_data = 12 [deprecated = true]; - } - - // The last recorded phase for this task execution. - core.TaskExecution.Phase phase = 3; - - // Detailed log information output by the task execution. - repeated core.TaskLog logs = 4; - - // Time at which the task execution began running. - google.protobuf.Timestamp started_at = 5; - - // The amount of time the task execution spent running. - google.protobuf.Duration duration = 6; - - // Time at which the task execution was created. - google.protobuf.Timestamp created_at = 7; - - // Time at which the task execution was last updated. - google.protobuf.Timestamp updated_at = 8; - - // Custom data specific to the task plugin. - google.protobuf.Struct custom_info = 9; - - // If there is an explanation for the most recent phase transition, the reason will capture it. - string reason = 10; - - // A predefined yet extensible Task type identifier. - string task_type = 11; - - // Metadata around how a task was executed. - event.TaskExecutionMetadata metadata = 16; - - // The event version is used to indicate versioned changes in how data is maintained using this - // proto message. For example, event_verison > 0 means that maps tasks logs use the - // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog - // in this message. - int32 event_version = 17; - - // A time-series of the phase transition or update explanations. This, when compared to storing a singular reason - // as previously done, is much more valuable in visualizing and understanding historical evaluations. - repeated Reason reasons = 18; -} - -// Reason is a single message annotated with a timestamp to indicate the instant the reason occurred. -message Reason { - // occurred_at is the timestamp indicating the instant that this reason happened. - google.protobuf.Timestamp occurred_at = 1; - - // message is the explanation for the most recent phase transition or status update. - string message = 2; -} - -// Request structure to fetch inputs and output for a task execution. -// By default this data is not returned inline in :ref:`ref_flyteidl.admin.TaskExecutionGetRequest` -message TaskExecutionGetDataRequest { - // The identifier of the task execution for which to fetch inputs and outputs. - // +required - core.TaskExecutionIdentifier id = 1; -} - -// Response structure for TaskExecutionGetDataRequest which contains inputs and outputs for a task execution. -message TaskExecutionGetDataResponse { - // Signed url to fetch a core.LiteralMap of task execution inputs. - // Deprecated: Please use full_inputs instead. - UrlBlob inputs = 1 [deprecated = true]; - - // Signed url to fetch a core.LiteralMap of task execution outputs. - // Deprecated: Please use full_outputs instead. - UrlBlob outputs = 2 [deprecated = true]; - - // Full_inputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_inputs = 3; - - // Full_outputs will only be populated if they are under a configured size threshold. - core.LiteralMap full_outputs = 4; - - // flyte tiny url to fetch a core.LiteralMap of task execution's IO - // Deck will be empty for task - FlyteURLs flyte_urls = 5; -} diff --git a/flyrs/protos/flyteidl/admin/version.proto b/flyrs/protos/flyteidl/admin/version.proto deleted file mode 100644 index e0e38bda1f..0000000000 --- a/flyrs/protos/flyteidl/admin/version.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -// Response for the GetVersion API -message GetVersionResponse { - // The control plane version information. FlyteAdmin and related components - // form the control plane of Flyte - Version control_plane_version = 1; -} - -// Provides Version information for a component -message Version { - // Specifies the GIT sha of the build - string Build = 1; - - // Version for the build, should follow a semver - string Version = 2; - - // Build timestamp - string BuildTime = 3; -} - -// Empty request for GetVersion -message GetVersionRequest { -} diff --git a/flyrs/protos/flyteidl/admin/workflow.proto b/flyrs/protos/flyteidl/admin/workflow.proto deleted file mode 100644 index b090f30ea8..0000000000 --- a/flyrs/protos/flyteidl/admin/workflow.proto +++ /dev/null @@ -1,92 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/core/compiler.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/workflow.proto"; -import "flyteidl/admin/description_entity.proto"; -import "google/protobuf/timestamp.proto"; - -// Represents a request structure to create a revision of a workflow. -// See :ref:`ref_flyteidl.admin.Workflow` for more details -message WorkflowCreateRequest { - // id represents the unique identifier of the workflow. - // +required - core.Identifier id = 1; - - // Represents the specification for workflow. - // +required - WorkflowSpec spec = 2; -} - -message WorkflowCreateResponse { - // Purposefully empty, may be populated in the future. -} - -// Represents the workflow structure stored in the Admin -// A workflow is created by ordering tasks and associating outputs to inputs -// in order to produce a directed-acyclic execution graph. -message Workflow { - // id represents the unique identifier of the workflow. - core.Identifier id = 1; - - // closure encapsulates all the fields that maps to a compiled version of the workflow. - WorkflowClosure closure = 2; - - // One-liner overview of the entity. - string short_description = 3; -} - -// Represents a list of workflows returned from the admin. -// See :ref:`ref_flyteidl.admin.Workflow` for more details -message WorkflowList { - // A list of workflows returned based on the request. - repeated Workflow workflows = 1; - - // In the case of multiple pages of results, the server-provided token can be used to fetch the next page - // in a query. If there are no more results, this value will be empty. - string token = 2; -} - -// Represents a structure that encapsulates the specification of the workflow. -message WorkflowSpec { - // Template of the task that encapsulates all the metadata of the workflow. - core.WorkflowTemplate template = 1; - - // Workflows that are embedded into other workflows need to be passed alongside the parent workflow to the - // propeller compiler (since the compiler doesn't have any knowledge of other workflows - ie, it doesn't reach out - // to Admin to see other registered workflows). In fact, subworkflows do not even need to be registered. - repeated core.WorkflowTemplate sub_workflows = 2; - - // Represents the specification for description entity. - DescriptionEntity description = 3; -} - -// A container holding the compiled workflow produced from the WorkflowSpec and additional metadata. -message WorkflowClosure { - // Represents the compiled representation of the workflow from the specification provided. - core.CompiledWorkflowClosure compiled_workflow = 1; - - // Time at which the workflow was created. - google.protobuf.Timestamp created_at = 2; -} - -// The workflow id is already used and the structure is different -message WorkflowErrorExistsDifferentStructure { - core.Identifier id = 1; -} - -// The workflow id is already used with an identical sctructure -message WorkflowErrorExistsIdenticalStructure { - core.Identifier id = 1; -} - -// When a CreateWorkflowRequest fails due to matching id -message CreateWorkflowFailureReason { - oneof reason { - WorkflowErrorExistsDifferentStructure exists_different_structure = 1; - WorkflowErrorExistsIdenticalStructure exists_identical_structure = 2; - } -} diff --git a/flyrs/protos/flyteidl/admin/workflow_attributes.proto b/flyrs/protos/flyteidl/admin/workflow_attributes.proto deleted file mode 100644 index 9767f00df7..0000000000 --- a/flyrs/protos/flyteidl/admin/workflow_attributes.proto +++ /dev/null @@ -1,89 +0,0 @@ -syntax = "proto3"; - -package flyteidl.admin; -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/admin"; - -import "flyteidl/admin/matchable_resource.proto"; - -// Defines a set of custom matching attributes which defines resource defaults for a project, domain and workflow. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message WorkflowAttributes { - // Unique project id for which this set of attributes will be applied. - string project = 1; - - // Unique domain id for which this set of attributes will be applied. - string domain = 2; - - // Workflow name for which this set of attributes will be applied. - string workflow = 3; - - MatchingAttributes matching_attributes = 4; - - // Optional, org key applied to the attributes. - string org = 5; -} - -// Sets custom attributes for a project, domain and workflow combination. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message WorkflowAttributesUpdateRequest { - WorkflowAttributes attributes = 1; -} - -// Purposefully empty, may be populated in the future. -message WorkflowAttributesUpdateResponse { -} - -// Request to get an individual workflow attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message WorkflowAttributesGetRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Unique domain id which this set of attributes references. - // +required - string domain = 2; - - // Workflow name which this set of attributes references. - // +required - string workflow = 3; - - // Which type of matchable attributes to return. - // +required - MatchableResource resource_type = 4; - - // Optional, org key applied to the attributes. - string org = 5; -} - -// Response to get an individual workflow attribute override. -message WorkflowAttributesGetResponse { - WorkflowAttributes attributes = 1; -} - -// Request to delete a set matchable workflow attribute override. -// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -message WorkflowAttributesDeleteRequest { - // Unique project id which this set of attributes references. - // +required - string project = 1; - - // Unique domain id which this set of attributes references. - // +required - string domain = 2; - - // Workflow name which this set of attributes references. - // +required - string workflow = 3; - - // Which type of matchable attributes to delete. - // +required - MatchableResource resource_type = 4; - - // Optional, org key applied to the attributes. - string org = 5; -} - -// Purposefully empty, may be populated in the future. -message WorkflowAttributesDeleteResponse { -} diff --git a/flyrs/protos/flyteidl/cacheservice/cacheservice.proto b/flyrs/protos/flyteidl/cacheservice/cacheservice.proto deleted file mode 100644 index f7f82f4921..0000000000 --- a/flyrs/protos/flyteidl/cacheservice/cacheservice.proto +++ /dev/null @@ -1,143 +0,0 @@ -syntax = "proto3"; - -package flyteidl.cacheservice; - -import "flyteidl/core/literals.proto"; -import "flyteidl/core/types.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/interface.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/cacheservice"; - -/* - * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. - */ -service CacheService { - // Retrieves cached data by key. - rpc Get (GetCacheRequest) returns (GetCacheResponse); - - // Stores or updates cached data by key. - rpc Put (PutCacheRequest) returns (PutCacheResponse); - - // Deletes cached data by key. - rpc Delete (DeleteCacheRequest) returns (DeleteCacheResponse); - - // Get or extend a reservation for a cache key - rpc GetOrExtendReservation (GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); - - // Release the reservation for a cache key - rpc ReleaseReservation (ReleaseReservationRequest) returns (ReleaseReservationResponse); -} - -/* - * Additional metadata as key-value pairs - */ -message KeyMapMetadata { - map values = 1; // Additional metadata as key-value pairs -} - -/* - * Metadata for cached outputs, including the source identifier and timestamps. - */ -message Metadata { - core.Identifier source_identifier = 1; // Source task or workflow identifier - KeyMapMetadata key_map = 2; // Additional metadata as key-value pairs - google.protobuf.Timestamp created_at = 3; // Creation timestamp - google.protobuf.Timestamp last_updated_at = 4; // Last update timestamp -} - -/* - * Represents cached output, either as literals or an URI, with associated metadata. - */ -message CachedOutput { - oneof output { - flyteidl.core.LiteralMap output_literals = 1; // Output literals - string output_uri = 2; // URI to output data - } - Metadata metadata = 3; // Associated metadata -} - -/* - * Request to retrieve cached data by key. - */ -message GetCacheRequest { - string key = 1; // Cache key -} - -/* - * Response with cached data for a given key. - */ -message GetCacheResponse { - CachedOutput output = 1; // Cached output -} - -/* - * Request to store/update cached data by key. - */ -message PutCacheRequest { - string key = 1; // Cache key - CachedOutput output = 2; // Output to cache - bool overwrite = 3; // Overwrite flag -} - -/* - * Response message of cache store/update operation. - */ -message PutCacheResponse { - // Empty, success indicated by no errors -} - -/* - * Request to delete cached data by key. - */ -message DeleteCacheRequest { - string key = 1; // Cache key -} - -/* - * Response message of cache deletion operation. - */ -message DeleteCacheResponse { - // Empty, success indicated by no errors -} - -// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. -message Reservation { - string key = 1; // The unique ID for the reservation - same as the cache key - string owner_id = 2; // The unique ID of the owner for the reservation - google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval - google.protobuf.Timestamp expires_at = 4; // Expiration timestamp of this reservation -} - -/* - * Request to get or extend a reservation for a cache key - */ -message GetOrExtendReservationRequest { - string key = 1; // The unique ID for the reservation - same as the cache key - string owner_id = 2; // The unique ID of the owner for the reservation - google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval -} - -/* - * Request to get or extend a reservation for a cache key - */ -message GetOrExtendReservationResponse { - Reservation reservation = 1; // The reservation that was created or extended -} - -/* - * Request to release the reservation for a cache key - */ -message ReleaseReservationRequest { - string key = 1; // The unique ID for the reservation - same as the cache key - string owner_id = 2; // The unique ID of the owner for the reservation -} - -/* - * Response message of release reservation operation. - */ -message ReleaseReservationResponse { - // Empty, success indicated by no errors -} diff --git a/flyrs/protos/flyteidl/core/artifact_id.proto b/flyrs/protos/flyteidl/core/artifact_id.proto deleted file mode 100644 index 022bc20cff..0000000000 --- a/flyrs/protos/flyteidl/core/artifact_id.proto +++ /dev/null @@ -1,112 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "google/protobuf/timestamp.proto"; -import "flyteidl/core/identifier.proto"; - - -message ArtifactKey { - // Project and domain and suffix needs to be unique across a given artifact store. - string project = 1; - string domain = 2; - string name = 3; - string org = 4; -} - -// Only valid for triggers -message ArtifactBindingData { - reserved 1 to 4; - // These two fields are only relevant in the partition value case - oneof partition_data { - string partition_key = 5; - bool bind_to_time_partition = 6; - } - - // This is only relevant in the time partition case - TimeTransform time_transform = 7; -} - -enum Granularity { - UNSET = 0; - MINUTE = 1; - HOUR = 2; - DAY = 3; // default - MONTH = 4; -} - -enum Operator { - MINUS = 0; - PLUS = 1; -} - -message TimeTransform { - string transform = 1; - Operator op = 2; -} - -message InputBindingData { - string var = 1; -} - -message RuntimeBinding {} - -message LabelValue { - oneof value { - // The string static value is for use in the Partitions object - string static_value = 1; - - // The time value is for use in the TimePartition case - google.protobuf.Timestamp time_value = 2; - ArtifactBindingData triggered_binding = 3; - InputBindingData input_binding = 4; - RuntimeBinding runtime_binding = 5; - } -} - -message Partitions { - map value = 1; -} - -message TimePartition { - LabelValue value = 1; - Granularity granularity = 2; -} - -message ArtifactID { - ArtifactKey artifact_key = 1; - - string version = 2; - - // Think of a partition as a tag on an Artifact, except it's a key-value pair. - // Different partitions naturally have different versions (execution ids). - Partitions partitions = 3; - - // There is no such thing as an empty time partition - if it's not set, then there is no time partition. - TimePartition time_partition = 4; -} - -message ArtifactTag { - ArtifactKey artifact_key = 1; - - LabelValue value = 2; -} - -// Uniqueness constraints for Artifacts -// - project, domain, name, version, partitions -// Option 2 (tags are standalone, point to an individual artifact id): -// - project, domain, name, alias (points to one partition if partitioned) -// - project, domain, name, partition key, partition value -message ArtifactQuery { - oneof identifier { - ArtifactID artifact_id = 1; - ArtifactTag artifact_tag = 2; - string uri = 3; - - // This is used in the trigger case, where a user specifies a value for an input that is one of the triggering - // artifacts, or a partition value derived from a triggering artifact. - ArtifactBindingData binding = 4; - } -} diff --git a/flyrs/protos/flyteidl/core/catalog.proto b/flyrs/protos/flyteidl/core/catalog.proto deleted file mode 100644 index 4d98c28d7e..0000000000 --- a/flyrs/protos/flyteidl/core/catalog.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/identifier.proto"; - -// Indicates the status of CatalogCaching. The reason why this is not embedded in TaskNodeMetadata is, that we may use for other types of nodes as well in the future -enum CatalogCacheStatus { - // Used to indicate that caching was disabled - CACHE_DISABLED = 0; - // Used to indicate that the cache lookup resulted in no matches - CACHE_MISS = 1; - // used to indicate that the associated artifact was a result of a previous execution - CACHE_HIT = 2; - // used to indicate that the resultant artifact was added to the cache - CACHE_POPULATED = 3; - // Used to indicate that cache lookup failed because of an error - CACHE_LOOKUP_FAILURE = 4; - // Used to indicate that cache lookup failed because of an error - CACHE_PUT_FAILURE = 5; - // Used to indicate the cache lookup was skipped - CACHE_SKIPPED = 6; - // Used to indicate that the cache was evicted - CACHE_EVICTED = 7; -}; - -message CatalogArtifactTag { - // Artifact ID is generated name - string artifact_id = 1; - // Flyte computes the tag automatically, as the hash of the values - string name = 2; -}; - -// Catalog artifact information with specific metadata -message CatalogMetadata { - // Dataset ID in the catalog - Identifier dataset_id = 1; - // Artifact tag in the catalog - CatalogArtifactTag artifact_tag = 2; - // Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context - oneof source_execution { - // Today we only support TaskExecutionIdentifier as a source, as catalog caching only works for task executions - TaskExecutionIdentifier source_task_execution = 3; - } -}; - -message CatalogReservation { - // Indicates the status of a catalog reservation operation. - enum Status { - // Used to indicate that reservations are disabled - RESERVATION_DISABLED = 0; - // Used to indicate that a reservation was successfully acquired or extended - RESERVATION_ACQUIRED = 1; - // Used to indicate that an active reservation currently exists - RESERVATION_EXISTS = 2; - // Used to indicate that the reservation has been successfully released - RESERVATION_RELEASED = 3; - // Used to indicate that a reservation operation resulted in failure - RESERVATION_FAILURE = 4; - } -} diff --git a/flyrs/protos/flyteidl/core/compiler.proto b/flyrs/protos/flyteidl/core/compiler.proto deleted file mode 100644 index 620ee26f2d..0000000000 --- a/flyrs/protos/flyteidl/core/compiler.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/interface.proto"; -import "flyteidl/core/workflow.proto"; -import "flyteidl/core/tasks.proto"; - -// Adjacency list for the workflow. This is created as part of the compilation process. Every process after the compilation -// step uses this created ConnectionSet -message ConnectionSet { - message IdList { - repeated string ids = 1; - } - - // A list of all the node ids that are downstream from a given node id - map downstream = 7; - - // A list of all the node ids, that are upstream of this node id - map upstream = 8; -} - -// Output of the compilation Step. This object represents one workflow. We store more metadata at this layer -message CompiledWorkflow { - // Completely contained Workflow Template - WorkflowTemplate template = 1; - // For internal use only! This field is used by the system and must not be filled in. Any values set will be ignored. - ConnectionSet connections = 2; -} - -// Output of the compilation step. This object represents one LaunchPlan. We store more metadata at this layer -message CompiledLaunchPlan { - // Completely contained LaunchPlan Template - LaunchPlanTemplate template = 1; -} - -// Output of the Compilation step. This object represent one Task. We store more metadata at this layer -message CompiledTask { - // Completely contained TaskTemplate - TaskTemplate template = 1; -} - -// A Compiled Workflow Closure contains all the information required to start a new execution, or to visualize a workflow -// and its details. The CompiledWorkflowClosure should always contain a primary workflow, that is the main workflow that -// will being the execution. All subworkflows are denormalized. WorkflowNodes refer to the workflow identifiers of -// compiled subworkflows. -message CompiledWorkflowClosure { - //+required - CompiledWorkflow primary = 1; - // Guaranteed that there will only exist one and only one workflow with a given id, i.e., every sub workflow has a - // unique identifier. Also every enclosed subworkflow is used either by a primary workflow or by a subworkflow - // as an inlined workflow - //+optional - repeated CompiledWorkflow sub_workflows = 2; - // Guaranteed that there will only exist one and only one task with a given id, i.e., every task has a unique id - //+required (at least 1) - repeated CompiledTask tasks = 3; - // A collection of launch plans that are compiled. Guaranteed that there will only exist one and only one launch plan - // with a given id, i.e., every launch plan has a unique id. - repeated CompiledLaunchPlan launch_plans = 4; -} diff --git a/flyrs/protos/flyteidl/core/condition.proto b/flyrs/protos/flyteidl/core/condition.proto deleted file mode 100644 index 84c7fb0314..0000000000 --- a/flyrs/protos/flyteidl/core/condition.proto +++ /dev/null @@ -1,63 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/literals.proto"; - -// Defines a 2-level tree where the root is a comparison operator and Operands are primitives or known variables. -// Each expression results in a boolean result. -message ComparisonExpression { - // Binary Operator for each expression - enum Operator { - EQ = 0; - NEQ = 1; - // Greater Than - GT = 2; - GTE = 3; - // Less Than - LT = 4; - LTE = 5; - } - - Operator operator = 1; - Operand left_value = 2; - Operand right_value = 3; -} - -// Defines an operand to a comparison expression. -message Operand { - oneof val { - // Can be a constant - core.Primitive primitive = 1 [deprecated = true]; - // Or one of this node's input variables - string var = 2; - // Replace the primitive field - core.Scalar scalar = 3; - } -} - -// Defines a boolean expression tree. It can be a simple or a conjunction expression. -// Multiple expressions can be combined using a conjunction or a disjunction to result in a final boolean result. -message BooleanExpression { - oneof expr { - ConjunctionExpression conjunction = 1; - ComparisonExpression comparison = 2; - } -} - -// Defines a conjunction expression of two boolean expressions. -message ConjunctionExpression { - // Nested conditions. They can be conjoined using AND / OR - // Order of evaluation is not important as the operators are Commutative - enum LogicalOperator { - // Conjunction - AND = 0; - OR = 1; - } - - LogicalOperator operator = 1; - BooleanExpression left_expression = 2; - BooleanExpression right_expression = 3; -} diff --git a/flyrs/protos/flyteidl/core/dynamic_job.proto b/flyrs/protos/flyteidl/core/dynamic_job.proto deleted file mode 100644 index 1665f5fa29..0000000000 --- a/flyrs/protos/flyteidl/core/dynamic_job.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; - -import "flyteidl/core/tasks.proto"; -import "flyteidl/core/workflow.proto"; -import "flyteidl/core/literals.proto"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -// Describes a set of tasks to execute and how the final outputs are produced. -message DynamicJobSpec { - // A collection of nodes to execute. - repeated Node nodes = 1; - - // An absolute number of successful completions of nodes required to mark this job as succeeded. As soon as this - // criteria is met, the dynamic job will be marked as successful and outputs will be computed. If this number - // becomes impossible to reach (e.g. number of currently running tasks + number of already succeeded tasks < - // min_successes) the task will be aborted immediately and marked as failed. The default value of this field, if not - // specified, is the count of nodes repeated field. - int64 min_successes = 2; - - // Describes how to bind the final output of the dynamic job from the outputs of executed nodes. The referenced ids - // in bindings should have the generated id for the subtask. - repeated Binding outputs = 3; - - // [Optional] A complete list of task specs referenced in nodes. - repeated TaskTemplate tasks = 4; - - // [Optional] A complete list of task specs referenced in nodes. - repeated WorkflowTemplate subworkflows = 5; -} diff --git a/flyrs/protos/flyteidl/core/errors.proto b/flyrs/protos/flyteidl/core/errors.proto deleted file mode 100644 index 4d25389349..0000000000 --- a/flyrs/protos/flyteidl/core/errors.proto +++ /dev/null @@ -1,35 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/execution.proto"; - -// Error message to propagate detailed errors from container executions to the execution -// engine. -message ContainerError { - // A simplified code for errors, so that we can provide a glossary of all possible errors. - string code = 1; - // A detailed error message. - string message = 2; - - // Defines a generic error type that dictates the behavior of the retry strategy. - enum Kind { - NON_RECOVERABLE = 0; - RECOVERABLE = 1; - } - - // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. - Kind kind = 3; - - // Defines the origin of the error (system, user, unknown). - ExecutionError.ErrorKind origin = 4; -} - -// Defines the errors.pb file format the container can produce to communicate -// failure reasons to the execution engine. -message ErrorDocument { - // The error raised during execution. - ContainerError error = 1; -} diff --git a/flyrs/protos/flyteidl/core/execution.proto b/flyrs/protos/flyteidl/core/execution.proto deleted file mode 100644 index d2eabdc577..0000000000 --- a/flyrs/protos/flyteidl/core/execution.proto +++ /dev/null @@ -1,116 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "google/protobuf/duration.proto"; - -// Indicates various phases of Workflow Execution -message WorkflowExecution { - enum Phase { - UNDEFINED = 0; - QUEUED = 1; - RUNNING = 2; - SUCCEEDING = 3; - SUCCEEDED = 4; - FAILING = 5; - FAILED = 6; - ABORTED = 7; - TIMED_OUT = 8; - ABORTING = 9; - } -} - -// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows -message NodeExecution { - enum Phase { - UNDEFINED = 0; - QUEUED = 1; - RUNNING = 2; - SUCCEEDED = 3; - FAILING = 4; - FAILED = 5; - ABORTED = 6; - SKIPPED = 7; - TIMED_OUT = 8; - DYNAMIC_RUNNING = 9; - RECOVERED = 10; - } -} - -// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, -// but this is the cumulative list that customers may want to know about for their task. -message TaskExecution{ - enum Phase { - UNDEFINED = 0; - QUEUED = 1; - RUNNING = 2; - SUCCEEDED = 3; - ABORTED = 4; - FAILED = 5; - // To indicate cases where task is initializing, like: ErrImagePull, ContainerCreating, PodInitializing - INITIALIZING = 6; - // To address cases, where underlying resource is not available: Backoff error, Resource quota exceeded - WAITING_FOR_RESOURCES = 7; - } -} - - -// Represents the error message from the execution. -message ExecutionError { - // Error code indicates a grouping of a type of error. - // More Info: - string code = 1; - // Detailed description of the error - including stack trace. - string message = 2; - // Full error contents accessible via a URI - string error_uri = 3; - // Error type: System or User - enum ErrorKind { - UNKNOWN = 0; - USER = 1; - SYSTEM = 2; - } - ErrorKind kind = 4; -} - -// Log information for the task that is specific to a log sink -// When our log story is flushed out, we may have more metadata here like log link expiry -message TaskLog { - - enum MessageFormat { - UNKNOWN = 0; - CSV = 1; - JSON = 2; - } - - string uri = 1; - string name = 2; - MessageFormat message_format = 3; - google.protobuf.Duration ttl = 4; -} - -// Represents customized execution run-time attributes. -message QualityOfServiceSpec { - // Indicates how much queueing delay an execution can tolerate. - google.protobuf.Duration queueing_budget = 1; - - // Add future, user-configurable options here -} - -// Indicates the priority of an execution. -message QualityOfService { - enum Tier { - // Default: no quality of service specified. - UNDEFINED = 0; - HIGH = 1; - MEDIUM = 2; - LOW = 3; - } - - oneof designation { - Tier tier = 1; - QualityOfServiceSpec spec = 2; - } -} diff --git a/flyrs/protos/flyteidl/core/identifier.proto b/flyrs/protos/flyteidl/core/identifier.proto deleted file mode 100644 index 48744f7894..0000000000 --- a/flyrs/protos/flyteidl/core/identifier.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -// Indicates a resource type within Flyte. -enum ResourceType { - UNSPECIFIED = 0; - TASK = 1; - WORKFLOW = 2; - LAUNCH_PLAN = 3; - // A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects. - // Eventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects - // in a similar manner to other Flyte objects - DATASET = 4; -} - -// Encapsulation of fields that uniquely identifies a Flyte resource. -message Identifier { - // Identifies the specific type of resource that this identifier corresponds to. - core.ResourceType resource_type = 1; - - // Name of the project the resource belongs to. - string project = 2; - - // Name of the domain the resource belongs to. - // A domain can be considered as a subset within a specific project. - string domain = 3; - - // User provided value for the resource. - string name = 4; - - // Specific version of the resource. - string version = 5; - - // Optional, org key applied to the resource. - string org = 6; -} - -// Encapsulation of fields that uniquely identifies a Flyte workflow execution -message WorkflowExecutionIdentifier { - // Name of the project the resource belongs to. - string project = 1; - - // Name of the domain the resource belongs to. - // A domain can be considered as a subset within a specific project. - string domain = 2; - - // User or system provided value for the resource. - string name = 4; - - // Optional, org key applied to the resource. - string org = 5; -} - -// Encapsulation of fields that identify a Flyte node execution entity. -message NodeExecutionIdentifier { - string node_id = 1; - - WorkflowExecutionIdentifier execution_id = 2; -} - -// Encapsulation of fields that identify a Flyte task execution entity. -message TaskExecutionIdentifier { - core.Identifier task_id = 1; - - core.NodeExecutionIdentifier node_execution_id = 2; - - uint32 retry_attempt = 3; -} - -// Encapsulation of fields the uniquely identify a signal. -message SignalIdentifier { - // Unique identifier for a signal. - string signal_id = 1; - - // Identifies the Flyte workflow execution this signal belongs to. - WorkflowExecutionIdentifier execution_id = 2; -} diff --git a/flyrs/protos/flyteidl/core/interface.proto b/flyrs/protos/flyteidl/core/interface.proto deleted file mode 100644 index ec7673d9c4..0000000000 --- a/flyrs/protos/flyteidl/core/interface.proto +++ /dev/null @@ -1,64 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/types.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/artifact_id.proto"; - -// Defines a strongly typed variable. -message Variable { - // Variable literal type. - LiteralType type = 1; - - //+optional string describing input variable - string description = 2; - - //+optional This object allows the user to specify how Artifacts are created. - // name, tag, partitions can be specified. The other fields (version and project/domain) are ignored. - core.ArtifactID artifact_partial_id = 3; - - core.ArtifactTag artifact_tag = 4; -} - -// A map of Variables -message VariableMap { - // Defines a map of variable names to variables. - map variables = 1; -} - -// Defines strongly typed inputs and outputs. -message TypedInterface { - VariableMap inputs = 1; - VariableMap outputs = 2; -} - -// A parameter is used as input to a launch plan and has -// the special ability to have a default value or mark itself as required. -message Parameter { - //+required Variable. Defines the type of the variable backing this parameter. - Variable var = 1; - - //+optional - oneof behavior { - // Defines a default value that has to match the variable type defined. - Literal default = 2; - - //+optional, is this value required to be filled. - bool required = 3; - - // This is an execution time search basically that should result in exactly one Artifact with a Type that - // matches the type of the variable. - core.ArtifactQuery artifact_query = 4; - - core.ArtifactID artifact_id = 5; - } -} - -// A map of Parameters. -message ParameterMap { - // Defines a map of parameter names to parameters. - map parameters = 1; -} diff --git a/flyrs/protos/flyteidl/core/literals.proto b/flyrs/protos/flyteidl/core/literals.proto deleted file mode 100644 index f886873ffb..0000000000 --- a/flyrs/protos/flyteidl/core/literals.proto +++ /dev/null @@ -1,183 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "google/protobuf/timestamp.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "flyteidl/core/types.proto"; - -// Primitive Types -message Primitive { - // Defines one of simple primitive types. These types will get translated into different programming languages as - // described in https://developers.google.com/protocol-buffers/docs/proto#scalar. - oneof value { - int64 integer = 1; - double float_value = 2; - string string_value = 3; - bool boolean = 4; - google.protobuf.Timestamp datetime = 5; - google.protobuf.Duration duration = 6; - } -} - -// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally -// undefined since it can be assigned to a scalar of any LiteralType. -message Void { -} - -// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. -// There are no restrictions on how the uri is formatted since it will depend on how to interact with the store. -message Blob { - BlobMetadata metadata = 1; - string uri = 3; -} - -message BlobMetadata { - BlobType type = 1; -} - -// A simple byte array with a tag to help different parts of the system communicate about what is in the byte array. -// It's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data. -message Binary { - bytes value = 1; - string tag = 2; -} - -// A strongly typed schema that defines the interface of data retrieved from the underlying storage medium. -message Schema { - string uri = 1; - SchemaType type = 3; -} - -// The runtime representation of a tagged union value. See `UnionType` for more details. -message Union { - Literal value = 1; - LiteralType type = 2; -} - -message StructuredDatasetMetadata { - // Bundle the type information along with the literal. - // This is here because StructuredDatasets can often be more defined at run time than at compile time. - // That is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset, - // without any column information, but at run time, you might have that column information. - // flytekit python will copy this type information into the literal, from the type information, if not provided by - // the various plugins (encoders). - // Since this field is run time generated, it's not used for any type checking. - StructuredDatasetType structured_dataset_type = 1; -} - -message StructuredDataset { - // String location uniquely identifying where the data is. - // Should start with the storage location (e.g. s3://, gs://, bq://, etc.) - string uri = 1; - - StructuredDatasetMetadata metadata = 2; -} - -message Scalar { - oneof value { - Primitive primitive = 1; - Blob blob = 2; - Binary binary = 3; - Schema schema = 4; - Void none_type = 5; - Error error = 6; - google.protobuf.Struct generic = 7; - StructuredDataset structured_dataset = 8; - Union union = 9; - } -} - -// A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives. -message Literal { - oneof value { - // A simple value. - Scalar scalar = 1; - - // A collection of literals to allow nesting. - LiteralCollection collection = 2; - - // A map of strings to literals. - LiteralMap map = 3; - } - - // A hash representing this literal. - // This is used for caching purposes. For more details refer to RFC 1893 - // (https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md) - string hash = 4; - - // Additional metadata for literals. - map metadata = 5; -} - -// A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. -message LiteralCollection { - repeated Literal literals = 1; -} - -// A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. -message LiteralMap { - map literals = 1; -} - -// A collection of BindingData items. -message BindingDataCollection { - repeated BindingData bindings = 1; -} - -// A map of BindingData items. -message BindingDataMap { - map bindings = 1; -} - -message UnionInfo { - LiteralType targetType = 1; -} - -// Specifies either a simple value or a reference to another output. -message BindingData { - oneof value { - // A simple scalar value. - Scalar scalar = 1; - - // A collection of binding data. This allows nesting of binding data to any number - // of levels. - BindingDataCollection collection = 2; - - // References an output promised by another node. - OutputReference promise = 3; - - // A map of bindings. The key is always a string. - BindingDataMap map = 4; - } - - UnionInfo union = 5; -} - -// An input/output binding of a variable to either static value or a node output. -message Binding { - // Variable name must match an input/output variable of the node. - string var = 1; - - // Data to use to bind this variable. - BindingData binding = 2; -} - -// A generic key value pair. -message KeyValuePair { - //required. - string key = 1; - - //+optional. - string value = 2; -} - -// Retry strategy associated with an executable unit. -message RetryStrategy { - // Number of retries. Retries will be consumed when the job fails with a recoverable error. - // The number of retries must be less than or equals to 10. - uint32 retries = 5; -} diff --git a/flyrs/protos/flyteidl/core/metrics.proto b/flyrs/protos/flyteidl/core/metrics.proto deleted file mode 100644 index 120cd0f625..0000000000 --- a/flyrs/protos/flyteidl/core/metrics.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/identifier.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/struct.proto"; - -// Span represents a duration trace of Flyte execution. The id field denotes a Flyte execution entity or an operation -// which uniquely identifies the Span. The spans attribute allows this Span to be further broken down into more -// precise definitions. -message Span { - // start_time defines the instance this span began. - google.protobuf.Timestamp start_time = 1; - - // end_time defines the instance this span completed. - google.protobuf.Timestamp end_time = 2; - - oneof id { - // workflow_id is the id of the workflow execution this Span represents. - flyteidl.core.WorkflowExecutionIdentifier workflow_id = 3; - - // node_id is the id of the node execution this Span represents. - flyteidl.core.NodeExecutionIdentifier node_id = 4; - - // task_id is the id of the task execution this Span represents. - flyteidl.core.TaskExecutionIdentifier task_id = 5; - - // operation_id is the id of a unique operation that this Span represents. - string operation_id = 6; - } - - // spans defines a collection of Spans that breakdown this execution. - repeated Span spans = 7; -} - -// ExecutionMetrics is a collection of metrics that are collected during the execution of a Flyte task. -message ExecutionMetricResult { - // The metric this data represents. e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG. - string metric = 1; - - // The result data in prometheus range query result format - // https://prometheus.io/docs/prometheus/latest/querying/api/#expression-query-result-formats. - // This may include multiple time series, differentiated by their metric labels. - // Start time is greater of (execution attempt start, 48h ago) - // End time is lesser of (execution attempt end, now) - google.protobuf.Struct data = 2; -} diff --git a/flyrs/protos/flyteidl/core/security.proto b/flyrs/protos/flyteidl/core/security.proto deleted file mode 100644 index 3aba017476..0000000000 --- a/flyrs/protos/flyteidl/core/security.proto +++ /dev/null @@ -1,130 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -// Secret encapsulates information about the secret a task needs to proceed. An environment variable -// FLYTE_SECRETS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if -// secrets are passed through environment variables. -// FLYTE_SECRETS_DEFAULT_DIR will be passed to indicate the prefix of the path where secrets will be mounted if secrets -// are passed through file mounts. -message Secret { - enum MountType { - // Default case, indicates the client can tolerate either mounting options. - ANY = 0; - - // ENV_VAR indicates the secret needs to be mounted as an environment variable. - ENV_VAR = 1; - - // FILE indicates the secret needs to be mounted as a file. - FILE = 2; - } - - // The name of the secret group where to find the key referenced below. For K8s secrets, this should be the name of - // the v1/secret object. For Confidant, this should be the Credential name. For Vault, this should be the secret name. - // For AWS Secret Manager, this should be the name of the secret. - // +required - string group = 1; - - // The group version to fetch. This is not supported in all secret management systems. It'll be ignored for the ones - // that do not support it. - // +optional - string group_version = 2; - - // The name of the secret to mount. This has to match an existing secret in the system. It's up to the implementation - // of the secret management system to require case sensitivity. For K8s secrets, Confidant and Vault, this should - // match one of the keys inside the secret. For AWS Secret Manager, it's ignored. - // +optional - string key = 3; - - // mount_requirement is optional. Indicates where the secret has to be mounted. If provided, the execution will fail - // if the underlying key management system cannot satisfy that requirement. If not provided, the default location - // will depend on the key management system. - // +optional - MountType mount_requirement = 4; -} - -// OAuth2Client encapsulates OAuth2 Client Credentials to be used when making calls on behalf of that task. -message OAuth2Client { - // client_id is the public id for the client to use. The system will not perform any pre-auth validation that the - // secret requested matches the client_id indicated here. - // +required - string client_id = 1; - - // client_secret is a reference to the secret used to authenticate the OAuth2 client. - // +required - Secret client_secret = 2; -} - -// Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the -// right identity for the execution environment. -message Identity { - // iam_role references the fully qualified name of Identity & Access Management role to impersonate. - string iam_role = 1; - - // k8s_service_account references a kubernetes service account to impersonate. - string k8s_service_account = 2; - - // oauth2_client references an oauth2 client. Backend plugins can use this information to impersonate the client when - // making external calls. - OAuth2Client oauth2_client = 3; - - // execution_identity references the subject who makes the execution - string execution_identity = 4; -} - -// OAuth2TokenRequest encapsulates information needed to request an OAuth2 token. -// FLYTE_TOKENS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if -// tokens are passed through environment variables. -// FLYTE_TOKENS_PATH_PREFIX will be passed to indicate the prefix of the path where secrets will be mounted if tokens -// are passed through file mounts. -message OAuth2TokenRequest { - // Type of the token requested. - enum Type { - // CLIENT_CREDENTIALS indicates a 2-legged OAuth token requested using client credentials. - CLIENT_CREDENTIALS = 0; - } - - // name indicates a unique id for the token request within this task token requests. It'll be used as a suffix for - // environment variables and as a filename for mounting tokens as files. - // +required - string name = 1; - - // type indicates the type of the request to make. Defaults to CLIENT_CREDENTIALS. - // +required - Type type = 2; - - // client references the client_id/secret to use to request the OAuth2 token. - // +required - OAuth2Client client = 3; - - // idp_discovery_endpoint references the discovery endpoint used to retrieve token endpoint and other related - // information. - // +optional - string idp_discovery_endpoint = 4; - - // token_endpoint references the token issuance endpoint. If idp_discovery_endpoint is not provided, this parameter is - // mandatory. - // +optional - string token_endpoint = 5; -} - -// SecurityContext holds security attributes that apply to tasks. -message SecurityContext { - // run_as encapsulates the identity a pod should run as. If the task fills in multiple fields here, it'll be up to the - // backend plugin to choose the appropriate identity for the execution engine the task will run on. - Identity run_as = 1; - - // secrets indicate the list of secrets the task needs in order to proceed. Secrets will be mounted/passed to the - // pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS - // Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access - // to the secret) and to pass it to the remote execution engine. - repeated Secret secrets = 2; - - // tokens indicate the list of token requests the task needs in order to proceed. Tokens will be mounted/passed to the - // pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS - // Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access - // to the secret) and to pass it to the remote execution engine. - repeated OAuth2TokenRequest tokens = 3; -} diff --git a/flyrs/protos/flyteidl/core/tasks.proto b/flyrs/protos/flyteidl/core/tasks.proto deleted file mode 100644 index 20a1fa0cbf..0000000000 --- a/flyrs/protos/flyteidl/core/tasks.proto +++ /dev/null @@ -1,351 +0,0 @@ -syntax = "proto3"; - -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/interface.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/security.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -// A customizable interface to convey resources requested for a container. This can be interpreted differently for different -// container engines. -message Resources { - // Known resource names. - enum ResourceName { - UNKNOWN = 0; - CPU = 1; - GPU = 2; - MEMORY = 3; - STORAGE = 4; - // For Kubernetes-based deployments, pods use ephemeral local storage for scratch space, caching, and for logs. - EPHEMERAL_STORAGE = 5; - } - - // Encapsulates a resource name and value. - message ResourceEntry { - // Resource name. - ResourceName name = 1; - - // Value must be a valid k8s quantity. See - // https://github.com/kubernetes/apimachinery/blob/master/pkg/api/resource/quantity.go#L30-L80 - string value = 2; - } - - // The desired set of resources requested. ResourceNames must be unique within the list. - repeated ResourceEntry requests = 1; - - // Defines a set of bounds (e.g. min/max) within which the task can reliably run. ResourceNames must be unique - // within the list. - repeated ResourceEntry limits = 2; -} - -// Metadata associated with the GPU accelerator to allocate to a task. Contains -// information about device type, and for multi-instance GPUs, the partition size to -// use. -message GPUAccelerator { - // This can be any arbitrary string, and should be informed by the labels or taints - // associated with the nodes in question. Default cloud provider labels typically - // use the following values: `nvidia-tesla-t4`, `nvidia-tesla-a100`, etc. - string device = 1; - oneof partition_size_value { - bool unpartitioned = 2; - // Like `device`, this can be any arbitrary string, and should be informed by - // the labels or taints associated with the nodes in question. Default cloud - // provider labels typically use the following values: `1g.5gb`, `2g.10gb`, etc. - string partition_size = 3; - } -} - -// Encapsulates all non-standard resources, not captured by v1.ResourceRequirements, to -// allocate to a task. -message ExtendedResources { - // GPU accelerator to select for task. Contains information about device type, and - // for multi-instance GPUs, the partition size to use. - GPUAccelerator gpu_accelerator = 1; -} - -// Runtime information. This is loosely defined to allow for extensibility. -message RuntimeMetadata { - enum RuntimeType { - OTHER = 0; - FLYTE_SDK = 1; - } - - // Type of runtime. - RuntimeType type = 1; - - // Version of the runtime. All versions should be backward compatible. However, certain cases call for version - // checks to ensure tighter validation or setting expectations. - string version = 2; - - //+optional It can be used to provide extra information about the runtime (e.g. python, golang... etc.). - string flavor = 3; -} - -// Task Metadata -message TaskMetadata { - // Indicates whether the system should attempt to lookup this task's output to avoid duplication of work. - bool discoverable = 1; - - // Runtime information about the task. - RuntimeMetadata runtime = 2; - - // The overall timeout of a task including user-triggered retries. - google.protobuf.Duration timeout = 4; - - // Number of retries per task. - RetryStrategy retries = 5; - - // Indicates a logical version to apply to this task for the purpose of discovery. - string discovery_version = 6; - - // If set, this indicates that this task is deprecated. This will enable owners of tasks to notify consumers - // of the ending of support for a given task. - string deprecated_error_message = 7; - - // For interruptible we will populate it at the node level but require it be part of TaskMetadata - // for a user to set the value. - // We are using oneof instead of bool because otherwise we would be unable to distinguish between value being - // set by the user or defaulting to false. - // The logic of handling precedence will be done as part of flytepropeller. - - // Identify whether task is interruptible - oneof interruptible_value { - bool interruptible = 8; - }; - - // Indicates whether the system should attempt to execute discoverable instances in serial to avoid duplicate work - bool cache_serializable = 9; - - // Indicates whether the task will generate a Deck URI when it finishes executing. - bool generates_deck = 10; - - // Arbitrary tags that allow users and the platform to store small but arbitrary labels - map tags = 11; - - // pod_template_name is the unique name of a PodTemplate k8s resource to be used as the base configuration if this - // task creates a k8s Pod. If this value is set, the specified PodTemplate will be used instead of, but applied - // identically as, the default PodTemplate configured in FlytePropeller. - string pod_template_name = 12; - - // cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. - repeated string cache_ignore_input_vars = 13; -} - -// A Task structure that uniquely identifies a task in the system -// Tasks are registered as a first step in the system. -message TaskTemplate { - // Auto generated taskId by the system. Task Id uniquely identifies this task globally. - Identifier id = 1; - - // A predefined yet extensible Task type identifier. This can be used to customize any of the components. If no - // extensions are provided in the system, Flyte will resolve the this task to its TaskCategory and default the - // implementation registered for the TaskCategory. - string type = 2; - - // Extra metadata about the task. - TaskMetadata metadata = 3; - - // A strongly typed interface for the task. This enables others to use this task within a workflow and guarantees - // compile-time validation of the workflow to avoid costly runtime failures. - TypedInterface interface = 4; - - // Custom data about the task. This is extensible to allow various plugins in the system. - google.protobuf.Struct custom = 5; - - // Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. - // If no corresponding execution-layer plugins are found, the system will default to handling these using built-in - // handlers. - oneof target { - Container container = 6; - K8sPod k8s_pod = 17; - Sql sql = 18; - } - - // This can be used to customize task handling at execution time for the same task type. - int32 task_type_version = 7; - - // security_context encapsulates security attributes requested to run this task. - SecurityContext security_context = 8; - - // Encapsulates all non-standard resources, not captured by - // v1.ResourceRequirements, to allocate to a task. - ExtendedResources extended_resources = 9; - - // Metadata about the custom defined for this task. This is extensible to allow various plugins in the system - // to use as required. - // reserve the field numbers 1 through 15 for very frequently occurring message elements - map config = 16; -} - -// ----------------- First class Plugins - -// Defines port properties for a container. -message ContainerPort { - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - uint32 container_port = 1; -} - -message Container { - // Container image url. Eg: docker/redis:latest - string image = 1; - - // Command to be executed, if not provided, the default entrypoint in the container image will be used. - repeated string command = 2; - - // These will default to Flyte given paths. If provided, the system will not append known paths. If the task still - // needs flyte's inputs and outputs path, add $(FLYTE_INPUT_FILE), $(FLYTE_OUTPUT_FILE) wherever makes sense and the - // system will populate these before executing the container. - repeated string args = 3; - - // Container resources requirement as specified by the container engine. - Resources resources = 4; - - // Environment variables will be set as the container is starting up. - repeated KeyValuePair env = 5; - - // Allows extra configs to be available for the container. - // TODO: elaborate on how configs will become available. - // Deprecated, please use TaskTemplate.config instead. - repeated KeyValuePair config = 6 [deprecated = true]; - - // Ports to open in the container. This feature is not supported by all execution engines. (e.g. supported on K8s but - // not supported on AWS Batch) - // Only K8s - repeated ContainerPort ports = 7; - - // BETA: Optional configuration for DataLoading. If not specified, then default values are used. - // This makes it possible to to run a completely portable container, that uses inputs and outputs - // only from the local file-system and without having any reference to flyteidl. This is supported only on K8s at the moment. - // If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories - // are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation - // to understand the default paths. - // Only K8s - DataLoadingConfig data_config = 9; - - // Architecture-type the container image supports. - enum Architecture { - UNKNOWN = 0; - AMD64 = 1; - ARM64 = 2; - ARM_V6 = 3; - ARM_V7 = 4; - } - Architecture architecture = 10; -} - -// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) -message IOStrategy { - // Mode to use for downloading - enum DownloadMode { - // All data will be downloaded before the main container is executed - DOWNLOAD_EAGER = 0; - // Data will be downloaded as a stream and an End-Of-Stream marker will be written to indicate all data has been downloaded. Refer to protocol for details - DOWNLOAD_STREAM = 1; - // Large objects (offloaded) will not be downloaded - DO_NOT_DOWNLOAD = 2; - } - // Mode to use for uploading - enum UploadMode { - // All data will be uploaded after the main container exits - UPLOAD_ON_EXIT = 0; - // Data will be uploaded as it appears. Refer to protocol specification for details - UPLOAD_EAGER = 1; - // Data will not be uploaded, only references will be written - DO_NOT_UPLOAD = 2; - } - // Mode to use to manage downloads - DownloadMode download_mode = 1; - // Mode to use to manage uploads - UploadMode upload_mode = 2; -} - -// This configuration allows executing raw containers in Flyte using the Flyte CoPilot system. -// Flyte CoPilot, eliminates the needs of flytekit or sdk inside the container. Any inputs required by the users container are side-loaded in the input_path -// Any outputs generated by the user container - within output_path are automatically uploaded. -message DataLoadingConfig { - // LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers. - // If the user has access to the protocol buffer definitions, it is recommended to use the PROTO format. - // JSON and YAML do not need any protobuf definitions to read it - // All remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem) - enum LiteralMapFormat { - // JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - https://www.json.org/json-en.html - JSON = 0; - YAML = 1; - // Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core - PROTO = 2; - } - // Flag enables DataLoading Config. If this is not set, data loading will not be used! - bool enabled = 1; - // File system path (start at root). This folder will contain all the inputs exploded to a separate file. - // Example, if the input interface needs (x: int, y: blob, z: multipart_blob) and the input path is '/var/flyte/inputs', then the file system will look like - // /var/flyte/inputs/inputs. .pb .json .yaml> -> Format as defined previously. The Blob and Multipart blob will reference local filesystem instead of remote locations - // /var/flyte/inputs/x -> X is a file that contains the value of x (integer) in string format - // /var/flyte/inputs/y -> Y is a file in Binary format - // /var/flyte/inputs/z/... -> Note Z itself is a directory - // More information about the protocol - refer to docs #TODO reference docs here - string input_path = 2; - // File system path (start at root). This folder should contain all the outputs for the task as individual files and/or an error text file - string output_path = 3; - // In the inputs folder, there will be an additional summary/metadata file that contains references to all files or inlined primitive values. - // This format decides the actual encoding for the data. Refer to the encoding to understand the specifics of the contents and the encoding - LiteralMapFormat format = 4; - IOStrategy io_strategy = 5; -} - -// Defines a pod spec and additional pod metadata that is created when a task is executed. -message K8sPod { - // Contains additional metadata for building a kubernetes pod. - K8sObjectMetadata metadata = 1; - - // Defines the primary pod spec created when a task is executed. - // This should be a JSON-marshalled pod spec, which can be defined in - // - go, using: https://github.com/kubernetes/api/blob/release-1.21/core/v1/types.go#L2936 - // - python: using https://github.com/kubernetes-client/python/blob/release-19.0/kubernetes/client/models/v1_pod_spec.py - google.protobuf.Struct pod_spec = 2; - - // BETA: Optional configuration for DataLoading. If not specified, then default values are used. - // This makes it possible to to run a completely portable container, that uses inputs and outputs - // only from the local file-system and without having any reference to flytekit. This is supported only on K8s at the moment. - // If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories - // are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation - // to understand the default paths. - // Only K8s - DataLoadingConfig data_config = 3; -} - -// Metadata for building a kubernetes object when a task is executed. -message K8sObjectMetadata { - // Optional labels to add to the pod definition. - map labels = 1; - - // Optional annotations to add to the pod definition. - map annotations = 2; -} - -// Sql represents a generic sql workload with a statement and dialect. -message Sql { - // The actual query to run, the query can have templated parameters. - // We use Flyte's Golang templating format for Query templating. - // For example, - // insert overwrite directory '{{ .rawOutputDataPrefix }}' stored as parquet - // select * - // from my_table - // where ds = '{{ .Inputs.ds }}' - string statement = 1; - // The dialect of the SQL statement. This is used to validate and parse SQL statements at compilation time to avoid - // expensive runtime operations. If set to an unsupported dialect, no validation will be done on the statement. - // We support the following dialect: ansi, hive. - enum Dialect { - UNDEFINED = 0; - ANSI = 1; - HIVE = 2; - OTHER = 3; - } - Dialect dialect = 2; -} diff --git a/flyrs/protos/flyteidl/core/types.proto b/flyrs/protos/flyteidl/core/types.proto deleted file mode 100644 index 2c36ff32ec..0000000000 --- a/flyrs/protos/flyteidl/core/types.proto +++ /dev/null @@ -1,208 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "google/protobuf/struct.proto"; - -// Define a set of simple types. -enum SimpleType { - NONE = 0; - INTEGER = 1; - FLOAT = 2; - STRING = 3; - BOOLEAN = 4; - DATETIME = 5; - DURATION = 6; - BINARY = 7; - ERROR = 8; - STRUCT = 9; -} - -// Defines schema columns and types to strongly type-validate schemas interoperability. -message SchemaType { - message SchemaColumn { - // A unique name -within the schema type- for the column - string name = 1; - - enum SchemaColumnType { - INTEGER = 0; - FLOAT = 1; - STRING = 2; - BOOLEAN = 3; - DATETIME = 4; - DURATION = 5; - } - - // The column type. This allows a limited set of types currently. - SchemaColumnType type = 2; - } - - // A list of ordered columns this schema comprises of. - repeated SchemaColumn columns = 3; -} - -message StructuredDatasetType { - message DatasetColumn { - // A unique name within the schema type for the column. - string name = 1; - - // The column type. - LiteralType literal_type = 2; - } - - // A list of ordered columns this schema comprises of. - repeated DatasetColumn columns = 1; - - // This is the storage format, the format of the bits at rest - // parquet, feather, csv, etc. - // For two types to be compatible, the format will need to be an exact match. - string format = 2; - - // This is a string representing the type that the bytes in external_schema_bytes are formatted in. - // This is an optional field that will not be used for type checking. - string external_schema_type = 3; - - // The serialized bytes of a third-party schema library like Arrow. - // This is an optional field that will not be used for type checking. - bytes external_schema_bytes = 4; -} - -// Defines type behavior for blob objects -message BlobType { - enum BlobDimensionality { - SINGLE = 0; - MULTIPART = 1; - } - - // Format can be a free form string understood by SDK/UI etc like - // csv, parquet etc - string format = 1; - BlobDimensionality dimensionality = 2; -} - -// Enables declaring enum types, with predefined string values -// For len(values) > 0, the first value in the ordered list is regarded as the default value. If you wish -// To provide no defaults, make the first value as undefined. -message EnumType { - // Predefined set of enum values. - repeated string values = 1; -} - -// Defines a tagged union type, also known as a variant (and formally as the sum type). -// -// A sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag -// A value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by -// storing the varaint's tag with the literal value and can be examined in runtime. -// -// Type S is typically written as -// S := Apple A | Banana B | Cantaloupe C | ... -// -// Notably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value: -// Optional X := X | Null -// -// See also: https://en.wikipedia.org/wiki/Tagged_union -message UnionType { - // Predefined set of variants in union. - repeated LiteralType variants = 1; -} - -// Hints to improve type matching -// e.g. allows distinguishing output from custom type transformers -// even if the underlying IDL serialization matches. -message TypeStructure { - // Must exactly match for types to be castable - string tag = 1; - // dataclass_type only exists for dataclasses. - // This is used to resolve the type of the fields of dataclass - // The key is the field name, and the value is the literal type of the field - // e.g. For dataclass Foo, with fields a, and a is a string - // Foo.a will be resolved as a literal type of string from dataclass_type - map dataclass_type = 2; -} - -// TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs. -message TypeAnnotation { - // A arbitrary JSON payload to describe a type. - google.protobuf.Struct annotations = 1; -} - -// Defines a strong type to allow type checking between interfaces. -message LiteralType { - oneof type { - // A simple type that can be compared one-to-one with another. - SimpleType simple = 1; - - // A complex type that requires matching of inner fields. - SchemaType schema = 2; - - // Defines the type of the value of a collection. Only homogeneous collections are allowed. - LiteralType collection_type = 3; - - // Defines the type of the value of a map type. The type of the key is always a string. - LiteralType map_value_type = 4; - - // A blob might have specialized implementation details depending on associated metadata. - BlobType blob = 5; - - // Defines an enum with pre-defined string values. - EnumType enum_type = 7; - - // Generalized schema support - StructuredDatasetType structured_dataset_type = 8; - - // Defines an union type with pre-defined LiteralTypes. - UnionType union_type = 10; - } - - // This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by - // consumers to identify special behavior or display extended information for the type. - google.protobuf.Struct metadata = 6; - - // This field contains arbitrary data that might have special semantic - // meaning for the client but does not effect internal flyte behavior. - TypeAnnotation annotation = 9; - - // Hints to improve type matching. - TypeStructure structure = 11; -} - -// A reference to an output produced by a node. The type can be retrieved -and validated- from -// the underlying interface of the node. -message OutputReference { - // Node id must exist at the graph layer. - string node_id = 1; - - // Variable name must refer to an output variable for the node. - string var = 2; - - repeated PromiseAttribute attr_path = 3; -} - -// PromiseAttribute stores the attribute path of a promise, which will be resolved at runtime. -// The attribute path is a list of strings and integers. -// In the following example, -// ``` -// @workflow -// def wf(): -// o = t1() -// t2(o.a["b"][0]) -// ``` -// the output reference t2 binds to has a list of PromiseAttribute ["a", "b", 0] - -message PromiseAttribute { - oneof value { - string string_value = 1; - int32 int_value = 2; - } -} - -// Represents an error thrown from a node. -message Error { - // The node id that threw the error. - string failed_node_id = 1; - - // Error message thrown. - string message = 2; -} diff --git a/flyrs/protos/flyteidl/core/workflow.proto b/flyrs/protos/flyteidl/core/workflow.proto deleted file mode 100644 index 4701526d72..0000000000 --- a/flyrs/protos/flyteidl/core/workflow.proto +++ /dev/null @@ -1,315 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/condition.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/interface.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/tasks.proto"; -import "flyteidl/core/types.proto"; -import "flyteidl/core/security.proto"; -import "google/protobuf/duration.proto"; - -// Defines a condition and the execution unit that should be executed if the condition is satisfied. -message IfBlock { - core.BooleanExpression condition = 1; - Node then_node = 2; -} - -// Defines a series of if/else blocks. The first branch whose condition evaluates to true is the one to execute. -// If no conditions were satisfied, the else_node or the error will execute. -message IfElseBlock { - //+required. First condition to evaluate. - IfBlock case = 1; - - //+optional. Additional branches to evaluate. - repeated IfBlock other = 2; - - //+required. - oneof default { - // The node to execute in case none of the branches were taken. - Node else_node = 3; - - // An error to throw in case none of the branches were taken. - Error error = 4; - } -} - -// BranchNode is a special node that alter the flow of the workflow graph. It allows the control flow to branch at -// runtime based on a series of conditions that get evaluated on various parameters (e.g. inputs, primitives). -message BranchNode { - //+required - IfElseBlock if_else = 1; -} - -// Refers to the task that the Node is to execute. -message TaskNode { - oneof reference { - // A globally unique identifier for the task. - Identifier reference_id = 1; - } - - // Optional overrides applied at task execution time. - TaskNodeOverrides overrides = 2; -} - -// Refers to a the workflow the node is to execute. -message WorkflowNode { - oneof reference { - // A globally unique identifier for the launch plan. - Identifier launchplan_ref = 1; - - // Reference to a subworkflow, that should be defined with the compiler context - Identifier sub_workflow_ref = 2; - } -} - -// ApproveCondition represents a dependency on an external approval. During execution, this will manifest as a boolean -// signal with the provided signal_id. -message ApproveCondition { - // A unique identifier for the requested boolean signal. - string signal_id = 1; -} - -// SignalCondition represents a dependency on an signal. -message SignalCondition { - // A unique identifier for the requested signal. - string signal_id = 1; - - // A type denoting the required value type for this signal. - LiteralType type = 2; - - // The variable name for the signal value in this nodes outputs. - string output_variable_name = 3; -} - -// SleepCondition represents a dependency on waiting for the specified duration. -message SleepCondition { - // The overall duration for this sleep. - google.protobuf.Duration duration = 1; -} - -// GateNode refers to the condition that is required for the gate to successfully complete. -message GateNode { - oneof condition { - // ApproveCondition represents a dependency on an external approval provided by a boolean signal. - ApproveCondition approve = 1; - - // SignalCondition represents a dependency on an signal. - SignalCondition signal = 2; - - // SleepCondition represents a dependency on waiting for the specified duration. - SleepCondition sleep = 3; - } -} - -// ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input -// values. An ArrayNode can be executed with configurable parallelism (separate from the parent -// workflow) and can be configured to succeed when a certain number of sub-nodes succeed. -message ArrayNode { - // node is the sub-node that will be executed for each element in the array. - Node node = 1; - - // parallelism defines the minimum number of instances to bring up concurrently at any given - // point. Note that this is an optimistic restriction and that, due to network partitioning or - // other failures, the actual number of currently running instances might be more. This has to - // be a positive number if assigned. Default value is size. - uint32 parallelism = 2; - - oneof success_criteria { - // min_successes is an absolute number of the minimum number of successful completions of - // sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful - // and outputs will be computed. This has to be a non-negative number if assigned. Default - // value is size (if specified). - uint32 min_successes = 3; - - // If the array job size is not known beforehand, the min_success_ratio can instead be used - // to determine when an ArrayNode can be marked successful. - float min_success_ratio = 4; - } -} - -// Defines extra information about the Node. -message NodeMetadata { - // A friendly name for the Node - string name = 1; - - // The overall timeout of a task. - google.protobuf.Duration timeout = 4; - - // Number of retries per task. - RetryStrategy retries = 5; - - // Identify whether node is interruptible - oneof interruptible_value { - bool interruptible = 6; - }; - - // Identify whether a node should have it's outputs cached. - oneof cacheable_value { - bool cacheable = 7; - } - - // The version of the cache to use. - oneof cache_version_value { - string cache_version = 8; - } - - // Identify whether caching operations involving this node should be serialized. - oneof cache_serializable_value { - bool cache_serializable = 9; - } -} - -// Links a variable to an alias. -message Alias { - // Must match one of the output variable names on a node. - string var = 1; - - // A workflow-level unique alias that downstream nodes can refer to in their input. - string alias = 2; -} - -// A Workflow graph Node. One unit of execution in the graph. Each node can be linked to a Task, a Workflow or a branch -// node. -message Node { - // A workflow-level unique identifier that identifies this node in the workflow. 'inputs' and 'outputs' are reserved - // node ids that cannot be used by other nodes. - string id = 1; - - // Extra metadata about the node. - NodeMetadata metadata = 2; - - // Specifies how to bind the underlying interface's inputs. All required inputs specified in the underlying interface - // must be fulfilled. - repeated Binding inputs = 3; - - //+optional Specifies execution dependency for this node ensuring it will only get scheduled to run after all its - // upstream nodes have completed. This node will have an implicit dependency on any node that appears in inputs - // field. - repeated string upstream_node_ids = 4; - - //+optional. A node can define aliases for a subset of its outputs. This is particularly useful if different nodes - // need to conform to the same interface (e.g. all branches in a branch node). Downstream nodes must refer to this - // nodes outputs using the alias if one's specified. - repeated Alias output_aliases = 5; - - // Information about the target to execute in this node. - oneof target { - // Information about the Task to execute in this node. - TaskNode task_node = 6; - - // Information about the Workflow to execute in this mode. - WorkflowNode workflow_node = 7; - - // Information about the branch node to evaluate in this node. - BranchNode branch_node = 8; - - // Information about the condition to evaluate in this node. - GateNode gate_node = 9; - - // Information about the sub-node executions for each value in the list of this nodes - // inputs values. - ArrayNode array_node = 10; - } -} - -// This is workflow layer metadata. These settings are only applicable to the workflow as a whole, and do not -// percolate down to child entities (like tasks) launched by the workflow. -message WorkflowMetadata { - // Indicates the runtime priority of workflow executions. - QualityOfService quality_of_service = 1; - - // Failure Handling Strategy - enum OnFailurePolicy { - // FAIL_IMMEDIATELY instructs the system to fail as soon as a node fails in the workflow. It'll automatically - // abort all currently running nodes and clean up resources before finally marking the workflow executions as - // failed. - FAIL_IMMEDIATELY = 0; - - // FAIL_AFTER_EXECUTABLE_NODES_COMPLETE instructs the system to make as much progress as it can. The system will - // not alter the dependencies of the execution graph so any node that depend on the failed node will not be run. - // Other nodes that will be executed to completion before cleaning up resources and marking the workflow - // execution as failed. - FAIL_AFTER_EXECUTABLE_NODES_COMPLETE = 1; - } - - // Defines how the system should behave when a failure is detected in the workflow execution. - OnFailurePolicy on_failure = 2; - - // Arbitrary tags that allow users and the platform to store small but arbitrary labels - map tags = 3; -} - -// The difference between these settings and the WorkflowMetadata ones is that these are meant to be passed down to -// a workflow's underlying entities (like tasks). For instance, 'interruptible' has no meaning at the workflow layer, it -// is only relevant when a task executes. The settings here are the defaults that are passed to all nodes -// unless explicitly overridden at the node layer. -// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be -// added to both this object and the WorkflowMetadata object above. -message WorkflowMetadataDefaults { - // Whether child nodes of the workflow are interruptible. - bool interruptible = 1; -} - -// Flyte Workflow Structure that encapsulates task, branch and subworkflow nodes to form a statically analyzable, -// directed acyclic graph. -message WorkflowTemplate { - // A globally unique identifier for the workflow. - Identifier id = 1; - - // Extra metadata about the workflow. - WorkflowMetadata metadata = 2; - - // Defines a strongly typed interface for the Workflow. This can include some optional parameters. - TypedInterface interface = 3; - - // A list of nodes. In addition, 'globals' is a special reserved node id that can be used to consume workflow inputs. - repeated Node nodes = 4; - - // A list of output bindings that specify how to construct workflow outputs. Bindings can pull node outputs or - // specify literals. All workflow outputs specified in the interface field must be bound in order for the workflow - // to be validated. A workflow has an implicit dependency on all of its nodes to execute successfully in order to - // bind final outputs. - // Most of these outputs will be Binding's with a BindingData of type OutputReference. That is, your workflow can - // just have an output of some constant (`Output(5)`), but usually, the workflow will be pulling - // outputs from the output of a task. - repeated Binding outputs = 5; - - //+optional A catch-all node. This node is executed whenever the execution engine determines the workflow has failed. - // The interface of this node must match the Workflow interface with an additional input named 'error' of type - // pb.lyft.flyte.core.Error. - Node failure_node = 6; - - // workflow defaults - WorkflowMetadataDefaults metadata_defaults = 7; -} - -// Optional task node overrides that will be applied at task execution time. -message TaskNodeOverrides { - // A customizable interface to convey resources requested for a task container. - Resources resources = 1; - - // Overrides for all non-standard resources, not captured by - // v1.ResourceRequirements, to allocate to a task. - ExtendedResources extended_resources = 2; - - // Override for the image used by task pods. - string container_image = 3; -} - -// A structure that uniquely identifies a launch plan in the system. -message LaunchPlanTemplate { - // A globally unique identifier for the launch plan. - Identifier id = 1; - - // The input and output interface for the launch plan - TypedInterface interface = 2; - - // A collection of input literals that are fixed for the launch plan - LiteralMap fixed_inputs = 3; -} diff --git a/flyrs/protos/flyteidl/core/workflow_closure.proto b/flyrs/protos/flyteidl/core/workflow_closure.proto deleted file mode 100644 index c8ee990036..0000000000 --- a/flyrs/protos/flyteidl/core/workflow_closure.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package flyteidl.core; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/core"; - -import "flyteidl/core/workflow.proto"; -import "flyteidl/core/tasks.proto"; - -// Defines an enclosed package of workflow and tasks it references. -message WorkflowClosure { - //required. Workflow template. - WorkflowTemplate workflow = 1; - - //optional. A collection of tasks referenced by the workflow. Only needed if the workflow - // references tasks. - repeated TaskTemplate tasks = 2; -} diff --git a/flyrs/protos/flyteidl/datacatalog/datacatalog.proto b/flyrs/protos/flyteidl/datacatalog/datacatalog.proto deleted file mode 100644 index e296603113..0000000000 --- a/flyrs/protos/flyteidl/datacatalog/datacatalog.proto +++ /dev/null @@ -1,420 +0,0 @@ -syntax = "proto3"; - -package datacatalog; - -import "flyteidl/core/literals.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/datacatalog"; - -/* - * Data Catalog service definition - * Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. - * Artifacts are associated with a Dataset, and can be tagged for retrieval. - */ -service DataCatalog { - // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. - // Each dataset can have one or more artifacts - rpc CreateDataset (CreateDatasetRequest) returns (CreateDatasetResponse); - - // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. - rpc GetDataset (GetDatasetRequest) returns (GetDatasetResponse); - - // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary - // files or data values - rpc CreateArtifact (CreateArtifactRequest) returns (CreateArtifactResponse); - - // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. - rpc GetArtifact (GetArtifactRequest) returns (GetArtifactResponse); - - // Associate a tag with an artifact. Tags are unique within a Dataset. - rpc AddTag (AddTagRequest) returns (AddTagResponse); - - // Return a paginated list of artifacts - rpc ListArtifacts (ListArtifactsRequest) returns (ListArtifactsResponse); - - // Return a paginated list of datasets - rpc ListDatasets (ListDatasetsRequest) returns (ListDatasetsResponse); - - // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. - rpc UpdateArtifact (UpdateArtifactRequest) returns (UpdateArtifactResponse); - - // Attempts to get or extend a reservation for the corresponding artifact. If one already exists - // (ie. another entity owns the reservation) then that reservation is retrieved. - // Once you acquire a reservation, you need to periodically extend the reservation with an - // identical call. If the reservation is not extended before the defined expiration, it may be - // acquired by another task. - // Note: We may have multiple concurrent tasks with the same signature and the same input that - // try to populate the same artifact at the same time. Thus with reservation, only one task can - // run at a time, until the reservation expires. - // Note: If task A does not extend the reservation in time and the reservation expires, another - // task B may take over the reservation, resulting in two tasks A and B running in parallel. So - // a third task C may get the Artifact from A or B, whichever writes last. - rpc GetOrExtendReservation (GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); - - // Release the reservation when the task holding the spot fails so that the other tasks - // can grab the spot. - rpc ReleaseReservation (ReleaseReservationRequest) returns (ReleaseReservationResponse); -} - -/* - * Request message for creating a Dataset. - */ -message CreateDatasetRequest { - Dataset dataset = 1; -} - -/* - * Response message for creating a Dataset - */ -message CreateDatasetResponse { - -} - -/* - * Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier - * which is a combination of several fields. - */ -message GetDatasetRequest { - DatasetID dataset = 1; -} - -/* - * Response message for retrieving a Dataset. The response will include the metadata for the - * Dataset. - */ -message GetDatasetResponse { - Dataset dataset = 1; -} - -/* - * Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that - * can be one of artifact_id or tag. The result returned will include the artifact data and metadata - * associated with the artifact. - */ -message GetArtifactRequest { - DatasetID dataset = 1; - - oneof query_handle { - string artifact_id = 2; - string tag_name = 3; - } -} - -/* - * Response message for retrieving an Artifact. The result returned will include the artifact data - * and metadata associated with the artifact. - */ -message GetArtifactResponse { - Artifact artifact = 1; -} - -/* - * Request message for creating an Artifact and its associated artifact Data. - */ -message CreateArtifactRequest { - Artifact artifact = 1; -} - -/* - * Response message for creating an Artifact. - */ -message CreateArtifactResponse { - -} - -/* - * Request message for tagging an Artifact. - */ -message AddTagRequest { - Tag tag = 1; -} - -/* - * Response message for tagging an Artifact. - */ -message AddTagResponse { - -} - -// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. -message ListArtifactsRequest { - // Use a datasetID for which you want to retrieve the artifacts - DatasetID dataset = 1; - - // Apply the filter expression to this query - FilterExpression filter = 2; - // Pagination options to get a page of artifacts - PaginationOptions pagination = 3; -} - -// Response to list artifacts -message ListArtifactsResponse { - // The list of artifacts - repeated Artifact artifacts = 1; - // Token to use to request the next page, pass this into the next requests PaginationOptions - string next_token = 2; -} - -// List the datasets for the given query -message ListDatasetsRequest { - // Apply the filter expression to this query - FilterExpression filter = 1; - // Pagination options to get a page of datasets - PaginationOptions pagination = 2; -} - -// List the datasets response with token for next pagination -message ListDatasetsResponse { - // The list of datasets - repeated Dataset datasets = 1; - // Token to use to request the next page, pass this into the next requests PaginationOptions - string next_token = 2; -} - -/* - * Request message for updating an Artifact and overwriting its associated ArtifactData. - */ -message UpdateArtifactRequest { - // ID of dataset the artifact is associated with - DatasetID dataset = 1; - - // Either ID of artifact or name of tag to retrieve existing artifact from - oneof query_handle { - string artifact_id = 2; - string tag_name = 3; - } - - // List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing - // ArtifactData entries will be removed from the underlying blob storage and database. - repeated ArtifactData data = 4; - - // Update execution metadata(including execution domain, name, node, project data) when overwriting cache - Metadata metadata = 5; -} - -/* - * Response message for updating an Artifact. - */ -message UpdateArtifactResponse { - // The unique ID of the artifact updated - string artifact_id = 1; -} - -/* - * ReservationID message that is composed of several string fields. - */ -message ReservationID { - // The unique ID for the reserved dataset - DatasetID dataset_id = 1; - - // The specific artifact tag for the reservation - string tag_name = 2; -} - -// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. -message GetOrExtendReservationRequest { - // The unique ID for the reservation - ReservationID reservation_id = 1; - - // The unique ID of the owner for the reservation - string owner_id = 2; - - // Requested reservation extension heartbeat interval - google.protobuf.Duration heartbeat_interval = 3; -} - -// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. -message Reservation { - // The unique ID for the reservation - ReservationID reservation_id = 1; - - // The unique ID of the owner for the reservation - string owner_id = 2; - - // Recommended heartbeat interval to extend reservation - google.protobuf.Duration heartbeat_interval = 3; - - // Expiration timestamp of this reservation - google.protobuf.Timestamp expires_at = 4; - - // Free-form metadata associated with the artifact - Metadata metadata = 6; -} - -// Response including either a newly minted reservation or the existing reservation -message GetOrExtendReservationResponse { - // The reservation to be acquired or extended - Reservation reservation = 1; -} - -// Request to release reservation -message ReleaseReservationRequest { - // The unique ID for the reservation - ReservationID reservation_id = 1; - - // The unique ID of the owner for the reservation - string owner_id = 2; -} - -// Response to release reservation -message ReleaseReservationResponse { - -} - -/* - * Dataset message. It is uniquely identified by DatasetID. - */ -message Dataset { - DatasetID id = 1; - Metadata metadata = 2; - repeated string partitionKeys = 3; -} - -/* - * An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair - */ -message Partition { - string key = 1; - string value = 2; -} - -/* - * DatasetID message that is composed of several string fields. - */ -message DatasetID { - string project = 1; // The name of the project - string name = 2; // The name of the dataset - string domain = 3; // The domain (eg. environment) - string version = 4; // Version of the data schema - string UUID = 5; // UUID for the dataset (if set the above fields are optional) - - // Optional, org key applied to the resource. - string org = 6; -} - -/* - * Artifact message. It is composed of several string fields. - */ -message Artifact { - string id = 1; // The unique ID of the artifact - DatasetID dataset = 2; // The Dataset that the artifact belongs to - repeated ArtifactData data = 3; // A list of data that is associated with the artifact - Metadata metadata = 4; // Free-form metadata associated with the artifact - repeated Partition partitions = 5; - repeated Tag tags = 6; - google.protobuf.Timestamp created_at = 7; // creation timestamp of artifact, autogenerated by service -} - -/* - * ArtifactData that belongs to an artifact - */ -message ArtifactData { - string name = 1; - flyteidl.core.Literal value = 2; -} - -/* - * Tag message that is unique to a Dataset. It is associated to a single artifact and - * can be retrieved by name later. - */ -message Tag { - string name = 1; // Name of tag - string artifact_id = 2; // The tagged artifact - DatasetID dataset = 3; // The Dataset that this tag belongs to -} - -/* - * Metadata representation for artifacts and datasets - */ -message Metadata { - map key_map = 1; // key map is a dictionary of key/val strings that represent metadata -} - -// Filter expression that is composed of a combination of single filters -message FilterExpression { - repeated SinglePropertyFilter filters = 1; -} - -// A single property to filter on. -message SinglePropertyFilter { - oneof property_filter { - TagPropertyFilter tag_filter = 1; - PartitionPropertyFilter partition_filter = 2; - ArtifactPropertyFilter artifact_filter = 3; - DatasetPropertyFilter dataset_filter = 4; - } - - // as use-cases come up we can add more operators, ex: gte, like, not eq etc. - enum ComparisonOperator { - EQUALS = 0; - } - - ComparisonOperator operator = 10; // field 10 in case we add more entities to query - // Next field number: 11 -} - -// Artifact properties we can filter by -message ArtifactPropertyFilter { - // oneof because we can add more properties in the future - oneof property { - string artifact_id = 1; - } -} - -// Tag properties we can filter by -message TagPropertyFilter { - oneof property { - string tag_name = 1; - } -} - -// Partition properties we can filter by -message PartitionPropertyFilter { - oneof property { - KeyValuePair key_val = 1; - } -} - -message KeyValuePair { - string key = 1; - string value = 2; -} - -// Dataset properties we can filter by -message DatasetPropertyFilter { - oneof property { - string project = 1; - string name = 2; - string domain = 3; - string version = 4; - // Optional, org key applied to the dataset. - string org = 5; - } -} - -// Pagination options for making list requests -message PaginationOptions { - - // the max number of results to return - uint32 limit = 1; - - // the token to pass to fetch the next page - string token = 2; - - // the property that we want to sort the results by - SortKey sortKey = 3; - - // the sort order of the results - SortOrder sortOrder = 4; - - enum SortOrder { - DESCENDING = 0; - ASCENDING = 1; - } - - enum SortKey { - CREATION_TIME = 0; - } -} diff --git a/flyrs/protos/flyteidl/event/cloudevents.proto b/flyrs/protos/flyteidl/event/cloudevents.proto deleted file mode 100644 index d02c5ff516..0000000000 --- a/flyrs/protos/flyteidl/event/cloudevents.proto +++ /dev/null @@ -1,73 +0,0 @@ -syntax = "proto3"; - -package flyteidl.event; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event"; - -import "flyteidl/event/event.proto"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/interface.proto"; -import "flyteidl/core/artifact_id.proto"; -import "flyteidl/core/identifier.proto"; -import "google/protobuf/timestamp.proto"; - -// This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional -// information that downstream consumers may find useful. -message CloudEventWorkflowExecution { - event.WorkflowExecutionEvent raw_event = 1; - - core.TypedInterface output_interface = 2; - - // The following are ExecutionMetadata fields - // We can't have the ExecutionMetadata object directly because of import cycle - repeated core.ArtifactID artifact_ids = 3; - core.WorkflowExecutionIdentifier reference_execution = 4; - string principal = 5; - - // The ID of the LP that generated the execution that generated the Artifact. - // Here for provenance information. - // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. - core.Identifier launch_plan_id = 6; -} - -message CloudEventNodeExecution { - event.NodeExecutionEvent raw_event = 1; - - // The relevant task execution if applicable - core.TaskExecutionIdentifier task_exec_id = 2; - - // The typed interface for the task that produced the event. - core.TypedInterface output_interface = 3; - - // The following are ExecutionMetadata fields - // We can't have the ExecutionMetadata object directly because of import cycle - repeated core.ArtifactID artifact_ids = 4; - string principal = 5; - - // The ID of the LP that generated the execution that generated the Artifact. - // Here for provenance information. - // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. - core.Identifier launch_plan_id = 6; -} - -message CloudEventTaskExecution { - event.TaskExecutionEvent raw_event = 1; -} - -// This event is to be sent by Admin after it creates an execution. -message CloudEventExecutionStart { - // The execution created. - core.WorkflowExecutionIdentifier execution_id = 1; - // The launch plan used. - core.Identifier launch_plan_id = 2; - - core.Identifier workflow_id = 3; - - // Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. - repeated core.ArtifactID artifact_ids = 4; - - // Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. - repeated string artifact_trackers = 5; - - string principal = 6; -} diff --git a/flyrs/protos/flyteidl/event/event.proto b/flyrs/protos/flyteidl/event/event.proto deleted file mode 100644 index 641a3e4dae..0000000000 --- a/flyrs/protos/flyteidl/event/event.proto +++ /dev/null @@ -1,315 +0,0 @@ -syntax = "proto3"; - -package flyteidl.event; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/event"; - -import "flyteidl/core/literals.proto"; -import "flyteidl/core/compiler.proto"; -import "flyteidl/core/execution.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/catalog.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/struct.proto"; - - -message WorkflowExecutionEvent { - // Workflow execution id - core.WorkflowExecutionIdentifier execution_id = 1; - - // the id of the originator (Propeller) of the event - string producer_id = 2; - - core.WorkflowExecution.Phase phase = 3; - - // This timestamp represents when the original event occurred, it is generated - // by the executor of the workflow. - google.protobuf.Timestamp occurred_at = 4; - - oneof output_result { - // URL to the output of the execution, it encodes all the information - // including Cloud source provider. ie., s3://... - string output_uri = 5; - - // Error information for the execution - core.ExecutionError error = 6; - - // Raw output data produced by this workflow execution. - core.LiteralMap output_data = 7; - } -} - -message NodeExecutionEvent { - // Unique identifier for this node execution - core.NodeExecutionIdentifier id = 1; - - // the id of the originator (Propeller) of the event - string producer_id = 2; - - core.NodeExecution.Phase phase = 3; - - // This timestamp represents when the original event occurred, it is generated - // by the executor of the node. - google.protobuf.Timestamp occurred_at = 4; - - oneof input_value { - string input_uri = 5; - - // Raw input data consumed by this node execution. - core.LiteralMap input_data = 20; - } - - oneof output_result { - // URL to the output of the execution, it encodes all the information - // including Cloud source provider. ie., s3://... - string output_uri = 6; - - // Error information for the execution - core.ExecutionError error = 7; - - // Raw output data produced by this node execution. - core.LiteralMap output_data = 15; - } - - // Additional metadata to do with this event's node target based - // on the node type - oneof target_metadata { - WorkflowNodeMetadata workflow_node_metadata = 8; - TaskNodeMetadata task_node_metadata = 14; - } - - // [To be deprecated] Specifies which task (if any) launched this node. - ParentTaskExecutionMetadata parent_task_metadata = 9; - - // Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. - ParentNodeExecutionMetadata parent_node_metadata = 10; - - // Retry group to indicate grouping of nodes by retries - string retry_group = 11; - - // Identifier of the node in the original workflow/graph - // This maps to value of WorkflowTemplate.nodes[X].id - string spec_node_id = 12; - - // Friendly readable name for the node - string node_name = 13; - - int32 event_version = 16; - - // Whether this node launched a subworkflow. - bool is_parent = 17; - - // Whether this node yielded a dynamic workflow. - bool is_dynamic = 18; - - // String location uniquely identifying where the deck HTML file is - // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - string deck_uri = 19; - - // This timestamp represents the instant when the event was reported by the executing framework. For example, - // when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when - // literal inputs are initially copied. The event however will not be sent until after the copy completes. - // Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. - google.protobuf.Timestamp reported_at = 21; - - // Indicates if this node is an ArrayNode. - bool is_array = 22; -} - -// For Workflow Nodes we need to send information about the workflow that's launched -message WorkflowNodeMetadata { - core.WorkflowExecutionIdentifier execution_id = 1; -} - -message TaskNodeMetadata { - // Captures the status of caching for this execution. - core.CatalogCacheStatus cache_status = 1; - // This structure carries the catalog artifact information - core.CatalogMetadata catalog_key = 2; - // Captures the status of cache reservations for this execution. - core.CatalogReservation.Status reservation_status = 3; - // The latest checkpoint location - string checkpoint_uri = 4; - - // In the case this task launched a dynamic workflow we capture its structure here. - DynamicWorkflowNodeMetadata dynamic_workflow = 16; -} - -// For dynamic workflow nodes we send information about the dynamic workflow definition that gets generated. -message DynamicWorkflowNodeMetadata { - // id represents the unique identifier of the workflow. - core.Identifier id = 1; - - // Represents the compiled representation of the embedded dynamic workflow. - core.CompiledWorkflowClosure compiled_workflow = 2; - - // dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is - // required to correctly recover partially completed executions where the workflow has already been compiled. - string dynamic_job_spec_uri = 3; -} - -message ParentTaskExecutionMetadata { - core.TaskExecutionIdentifier id = 1; -} - -message ParentNodeExecutionMetadata { - // Unique identifier of the parent node id within the execution - // This is value of core.NodeExecutionIdentifier.node_id of the parent node - string node_id = 1; -} - -message EventReason { - // An explanation for this event - string reason = 1; - - // The time this reason occurred - google.protobuf.Timestamp occurred_at = 2; -} - -// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. -message TaskExecutionEvent { - // ID of the task. In combination with the retryAttempt this will indicate - // the task execution uniquely for a given parent node execution. - core.Identifier task_id = 1; - - // A task execution is always kicked off by a node execution, the event consumer - // will use the parent_id to relate the task to it's parent node execution - core.NodeExecutionIdentifier parent_node_execution_id = 2; - - // retry attempt number for this task, ie., 2 for the second attempt - uint32 retry_attempt = 3; - - // Phase associated with the event - core.TaskExecution.Phase phase = 4; - - // id of the process that sent this event, mainly for trace debugging - string producer_id = 5; - - // log information for the task execution - repeated core.TaskLog logs = 6; - - // This timestamp represents when the original event occurred, it is generated - // by the executor of the task. - google.protobuf.Timestamp occurred_at = 7; - - oneof input_value { - // URI of the input file, it encodes all the information - // including Cloud source provider. ie., s3://... - string input_uri = 8; - - // Raw input data consumed by this task execution. - core.LiteralMap input_data = 19; - } - - oneof output_result { - // URI to the output of the execution, it will be in a format that encodes all the information - // including Cloud source provider. ie., s3://... - string output_uri = 9; - - // Error information for the execution - core.ExecutionError error = 10; - - // Raw output data produced by this task execution. - core.LiteralMap output_data = 17; - } - - // Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. - google.protobuf.Struct custom_info = 11; - - // Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) - // that should be recorded regardless of the lack of phase change. - // The version field should be incremented when metadata changes across the duration of an individual phase. - uint32 phase_version = 12; - - // An optional explanation for the phase transition. - // Deprecated: Use reasons instead. - string reason = 13 [deprecated = true]; - - // An optional list of explanations for the phase transition. - repeated EventReason reasons = 21; - - // A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin - // this type will be identical, but not all task executions necessarily use pre-registered definitions and this - // type is useful to render the task in the UI, filter task executions, etc. - string task_type = 14; - - // Metadata around how a task was executed. - TaskExecutionMetadata metadata = 16; - - // The event version is used to indicate versioned changes in how data is reported using this - // proto message. For example, event_verison > 0 means that maps tasks report logs using the - // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog - // in this message. - int32 event_version = 18; - - // This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s - // pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, - // but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps - // facilitates a more accurate portrayal of the evaluation time-series. - google.protobuf.Timestamp reported_at = 20; -} - -// This message contains metadata about external resources produced or used by a specific task execution. -message ExternalResourceInfo { - - // Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. - string external_id = 1; - - // A unique index for the external resource with respect to all external resources for this task. Although the - // identifier may change between task reporting events or retries, this will remain the same to enable aggregating - // information from multiple reports. - uint32 index = 2; - - // Retry attempt number for this external resource, ie., 2 for the second attempt - uint32 retry_attempt = 3; - - // Phase associated with the external resource - core.TaskExecution.Phase phase = 4; - - // Captures the status of caching for this external resource execution. - core.CatalogCacheStatus cache_status = 5; - - // log information for the external resource execution - repeated core.TaskLog logs = 6; -} - - -// This message holds task execution metadata specific to resource allocation used to manage concurrent -// executions for a project namespace. -message ResourcePoolInfo { - // Unique resource ID used to identify this execution when allocating a token. - string allocation_token = 1; - - // Namespace under which this task execution requested an allocation token. - string namespace = 2; -} - -// Holds metadata around how a task was executed. -// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, -// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. -// Metadata is a container for these attributes across the task execution lifecycle. -message TaskExecutionMetadata { - - // Unique, generated name for this task execution used by the backend. - string generated_name = 1; - - // Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. - repeated ExternalResourceInfo external_resources = 2; - - // Includes additional data on concurrent resource management used during execution.. - // This is a repeated field because a plugin can request multiple resource allocations during execution. - repeated ResourcePoolInfo resource_pool_info = 3; - - // The identifier of the plugin used to execute this task. - string plugin_identifier = 4; - - // Includes the broad category of machine used for this specific task execution. - enum InstanceClass { - // The default instance class configured for the flyte application platform. - DEFAULT = 0; - - // The instance class configured for interruptible tasks. - INTERRUPTIBLE = 1; - } - InstanceClass instance_class = 16; -} diff --git a/flyrs/protos/flyteidl/plugins/array_job.proto b/flyrs/protos/flyteidl/plugins/array_job.proto deleted file mode 100644 index e202316ef5..0000000000 --- a/flyrs/protos/flyteidl/plugins/array_job.proto +++ /dev/null @@ -1,30 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component -// will be executed concurrently. -message ArrayJob { - // Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an - // optimistic restriction and that, due to network partitioning or other failures, the actual number of currently - // running instances might be more. This has to be a positive number if assigned. Default value is size. - int64 parallelism = 1; - - // Defines the number of instances to launch at most. This number should match the size of the input if the job - // requires processing of all input data. This has to be a positive number. - // In the case this is not defined, the back-end will determine the size at run-time by reading the inputs. - int64 size = 2; - - oneof success_criteria { - // An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, - // the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if - // assigned. Default value is size (if specified). - int64 min_successes = 3; - - // If the array job size is not known beforehand, the min_success_ratio can instead be used to determine when an array - // job can be marked successful. - float min_success_ratio = 4; - } -} diff --git a/flyrs/protos/flyteidl/plugins/dask.proto b/flyrs/protos/flyteidl/plugins/dask.proto deleted file mode 100644 index 6c5ecd9daf..0000000000 --- a/flyrs/protos/flyteidl/plugins/dask.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -import "flyteidl/core/tasks.proto"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - - -// Custom Proto for Dask Plugin. -message DaskJob { - // Spec for the scheduler pod. - DaskScheduler scheduler = 1; - - // Spec of the default worker group. - DaskWorkerGroup workers = 2; -} - -// Specification for the scheduler pod. -message DaskScheduler { - // Optional image to use. If unset, will use the default image. - string image = 1; - - // Resources assigned to the scheduler pod. - core.Resources resources = 2; -} - -message DaskWorkerGroup { - // Number of workers in the group. - uint32 number_of_workers = 1; - - // Optional image to use for the pods of the worker group. If unset, will use the default image. - string image = 2; - - // Resources assigned to the all pods of the worker group. - // As per https://kubernetes.dask.org/en/latest/kubecluster.html?highlight=limit#best-practices - // it is advised to only set limits. If requests are not explicitly set, the plugin will make - // sure to set requests==limits. - // The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit. - core.Resources resources = 3; -} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/common.proto b/flyrs/protos/flyteidl/plugins/kubeflow/common.proto deleted file mode 100644 index bde59e8b32..0000000000 --- a/flyrs/protos/flyteidl/plugins/kubeflow/common.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins.kubeflow; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - - -enum RestartPolicy { - RESTART_POLICY_NEVER = 0; - RESTART_POLICY_ON_FAILURE = 1; - RESTART_POLICY_ALWAYS = 2; -} - -enum CleanPodPolicy { - CLEANPOD_POLICY_NONE = 0; - CLEANPOD_POLICY_RUNNING = 1; - CLEANPOD_POLICY_ALL = 2; -} - -message RunPolicy { - // Defines the policy to kill pods after the job completes. Default to None. - CleanPodPolicy clean_pod_policy = 1; - - // TTL to clean up jobs. Default to infinite. - int32 ttl_seconds_after_finished = 2; - - // Specifies the duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer. - int32 active_deadline_seconds = 3; - - // Number of retries before marking this job failed. - int32 backoff_limit = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto b/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto deleted file mode 100644 index 5da5fb8d6e..0000000000 --- a/flyrs/protos/flyteidl/plugins/kubeflow/mpi.proto +++ /dev/null @@ -1,43 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins.kubeflow; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -import "flyteidl/core/tasks.proto"; -import "flyteidl/plugins/kubeflow/common.proto"; - -// Proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator -message DistributedMPITrainingTask { - // Worker replicas spec - DistributedMPITrainingReplicaSpec worker_replicas = 1; - - // Master replicas spec - DistributedMPITrainingReplicaSpec launcher_replicas = 2; - - // RunPolicy encapsulates various runtime policies of the distributed training - // job, for example how to clean up resources and how long the job can stay - // active. - RunPolicy run_policy = 3; - - // Number of slots per worker - int32 slots = 4; -} - -// Replica specification for distributed MPI training -message DistributedMPITrainingReplicaSpec { - // Number of replicas - int32 replicas = 1; - - // Image used for the replica group - string image = 2; - - // Resources required for the replica group - core.Resources resources = 3; - - // Restart policy determines whether pods will be restarted when they exit - RestartPolicy restart_policy = 4; - - // MPI sometimes requires different command set for different replica groups - repeated string command = 5; -} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto b/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto deleted file mode 100644 index c6838b2d1b..0000000000 --- a/flyrs/protos/flyteidl/plugins/kubeflow/pytorch.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins.kubeflow; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -import "flyteidl/core/tasks.proto"; -import "flyteidl/plugins/kubeflow/common.proto"; - -// Custom proto for torch elastic config for distributed training using -// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go -message ElasticConfig { - string rdzv_backend = 1; - int32 min_replicas = 2; - int32 max_replicas = 3; - int32 nproc_per_node = 4; - int32 max_restarts = 5; -} - -// Proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator -message DistributedPyTorchTrainingTask { - // Worker replicas spec - DistributedPyTorchTrainingReplicaSpec worker_replicas = 1; - - // Master replicas spec, master replicas can only have 1 replica - DistributedPyTorchTrainingReplicaSpec master_replicas = 2; - - // RunPolicy encapsulates various runtime policies of the distributed training - // job, for example how to clean up resources and how long the job can stay - // active. - RunPolicy run_policy = 3; - - // config for an elastic pytorch job - ElasticConfig elastic_config = 4; -} - -message DistributedPyTorchTrainingReplicaSpec { - // Number of replicas - int32 replicas = 1; - - // Image used for the replica group - string image = 2; - - // Resources required for the replica group - core.Resources resources = 3; - - // RestartPolicy determines whether pods will be restarted when they exit - RestartPolicy restart_policy = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto b/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto deleted file mode 100644 index 789666b989..0000000000 --- a/flyrs/protos/flyteidl/plugins/kubeflow/tensorflow.proto +++ /dev/null @@ -1,42 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins.kubeflow; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -import "flyteidl/core/tasks.proto"; -import "flyteidl/plugins/kubeflow/common.proto"; - -// Proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator -message DistributedTensorflowTrainingTask { - // Worker replicas spec - DistributedTensorflowTrainingReplicaSpec worker_replicas = 1; - - // Parameter server replicas spec - DistributedTensorflowTrainingReplicaSpec ps_replicas = 2; - - // Chief replicas spec - DistributedTensorflowTrainingReplicaSpec chief_replicas = 3; - - // RunPolicy encapsulates various runtime policies of the distributed training - // job, for example how to clean up resources and how long the job can stay - // active. - RunPolicy run_policy = 4; - - // Evaluator replicas spec - DistributedTensorflowTrainingReplicaSpec evaluator_replicas = 5; -} - -message DistributedTensorflowTrainingReplicaSpec { - // Number of replicas - int32 replicas = 1; - - // Image used for the replica group - string image = 2; - - // Resources required for the replica group - core.Resources resources = 3; - - // RestartPolicy Determines whether pods will be restarted when they exit - RestartPolicy restart_policy = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/mpi.proto b/flyrs/protos/flyteidl/plugins/mpi.proto deleted file mode 100644 index 69945b7a88..0000000000 --- a/flyrs/protos/flyteidl/plugins/mpi.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md -// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator -message DistributedMPITrainingTask { - // number of worker spawned in the cluster for this job - int32 num_workers = 1; - - // number of launcher replicas spawned in the cluster for this job - // The launcher pod invokes mpirun and communicates with worker pods through MPI. - int32 num_launcher_replicas = 2; - - // number of slots per worker used in hostfile. - // The available slots (GPUs) in each pod. - int32 slots = 3; -} diff --git a/flyrs/protos/flyteidl/plugins/presto.proto b/flyrs/protos/flyteidl/plugins/presto.proto deleted file mode 100644 index 5ff3a8a2e0..0000000000 --- a/flyrs/protos/flyteidl/plugins/presto.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field -// of a Presto task's TaskTemplate -message PrestoQuery { - string routing_group = 1; - string catalog = 2; - string schema = 3; - string statement = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/pytorch.proto b/flyrs/protos/flyteidl/plugins/pytorch.proto deleted file mode 100644 index 51972f81c4..0000000000 --- a/flyrs/protos/flyteidl/plugins/pytorch.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// Custom proto for torch elastic config for distributed training using -// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go -message ElasticConfig { - string rdzv_backend = 1; - int32 min_replicas = 2; - int32 max_replicas = 3; - int32 nproc_per_node = 4; - int32 max_restarts = 5; -} - -// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator -message DistributedPyTorchTrainingTask { - // number of worker replicas spawned in the cluster for this job - int32 workers = 1; - - // config for an elastic pytorch job - // - ElasticConfig elastic_config = 2; -} diff --git a/flyrs/protos/flyteidl/plugins/qubole.proto b/flyrs/protos/flyteidl/plugins/qubole.proto deleted file mode 100644 index b1faada9f3..0000000000 --- a/flyrs/protos/flyteidl/plugins/qubole.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// Defines a query to execute on a hive cluster. -message HiveQuery { - string query = 1; - uint32 timeout_sec = 2; - uint32 retryCount = 3; -} - -// Defines a collection of hive queries. -message HiveQueryCollection { - repeated HiveQuery queries = 2; -} - -// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field -// of a hive task's TaskTemplate -message QuboleHiveJob { - string cluster_label = 1; - HiveQueryCollection query_collection = 2 [deprecated=true]; - repeated string tags = 3; - HiveQuery query = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/ray.proto b/flyrs/protos/flyteidl/plugins/ray.proto deleted file mode 100644 index 1afcee8d93..0000000000 --- a/flyrs/protos/flyteidl/plugins/ray.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// RayJobSpec defines the desired state of RayJob -message RayJob { - // RayClusterSpec is the cluster template to run the job - RayCluster ray_cluster = 1; - // runtime_env is base64 encoded. - // Ray runtime environments: https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments - string runtime_env = 2; - // shutdown_after_job_finishes specifies whether the RayCluster should be deleted after the RayJob finishes. - bool shutdown_after_job_finishes = 3; - // ttl_seconds_after_finished specifies the number of seconds after which the RayCluster will be deleted after the RayJob finishes. - int32 ttl_seconds_after_finished = 4; -} - -// Define Ray cluster defines the desired state of RayCluster -message RayCluster { - // HeadGroupSpecs are the spec for the head pod - HeadGroupSpec head_group_spec = 1; - // WorkerGroupSpecs are the specs for the worker pods - repeated WorkerGroupSpec worker_group_spec = 2; - // Whether to enable autoscaling. - bool enable_autoscaling = 3; -} - -// HeadGroupSpec are the spec for the head pod -message HeadGroupSpec { - // Optional. RayStartParams are the params of the start command: address, object-store-memory. - // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start - map ray_start_params = 1; -} - -// WorkerGroupSpec are the specs for the worker pods -message WorkerGroupSpec { - // Required. RayCluster can have multiple worker groups, and it distinguishes them by name - string group_name = 1; - // Required. Desired replicas of the worker group. Defaults to 1. - int32 replicas = 2; - // Optional. Min replicas of the worker group. MinReplicas defaults to 1. - int32 min_replicas = 3; - // Optional. Max replicas of the worker group. MaxReplicas defaults to maxInt32 - int32 max_replicas = 4; - // Optional. RayStartParams are the params of the start command: address, object-store-memory. - // Refer to https://docs.ray.io/en/latest/ray-core/package-ref.html#ray-start - map ray_start_params = 5; -} diff --git a/flyrs/protos/flyteidl/plugins/spark.proto b/flyrs/protos/flyteidl/plugins/spark.proto deleted file mode 100644 index 666ea311b2..0000000000 --- a/flyrs/protos/flyteidl/plugins/spark.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; -import "google/protobuf/struct.proto"; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -message SparkApplication { - enum Type { - PYTHON = 0; - JAVA = 1; - SCALA = 2; - R = 3; - } -} - -// Custom Proto for Spark Plugin. -message SparkJob { - SparkApplication.Type applicationType = 1; - string mainApplicationFile = 2; - string mainClass = 3; - map sparkConf = 4; - map hadoopConf = 5; - string executorPath = 6; // Executor path for Python jobs. - // Databricks job configuration. - // Config structure can be found here. https://docs.databricks.com/dev-tools/api/2.0/jobs.html#request-structure. - google.protobuf.Struct databricksConf = 7; - // Databricks access token. https://docs.databricks.com/dev-tools/api/latest/authentication.html - // This token can be set in either flytepropeller or flytekit. - string databricksToken = 8; - // Domain name of your deployment. Use the form .cloud.databricks.com. - // This instance name can be set in either flytepropeller or flytekit. - string databricksInstance = 9; -} diff --git a/flyrs/protos/flyteidl/plugins/tensorflow.proto b/flyrs/protos/flyteidl/plugins/tensorflow.proto deleted file mode 100644 index b5f2d04561..0000000000 --- a/flyrs/protos/flyteidl/plugins/tensorflow.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator -message DistributedTensorflowTrainingTask { - // number of worker replicas spawned in the cluster for this job - int32 workers = 1; - // PS -> Parameter server - // number of ps replicas spawned in the cluster for this job - int32 ps_replicas = 2; - // number of chief replicas spawned in the cluster for this job - int32 chief_replicas = 3; - // number of evaluator replicas spawned in the cluster for this job - int32 evaluator_replicas = 4; -} diff --git a/flyrs/protos/flyteidl/plugins/waitable.proto b/flyrs/protos/flyteidl/plugins/waitable.proto deleted file mode 100644 index dd2138d535..0000000000 --- a/flyrs/protos/flyteidl/plugins/waitable.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -import "flyteidl/core/execution.proto"; -import "flyteidl/core/identifier.proto"; - -package flyteidl.plugins; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/plugins"; - -// Represents an Execution that was launched and could be waited on. -message Waitable { - core.WorkflowExecutionIdentifier wf_exec_id = 1; - core.WorkflowExecution.Phase phase = 2; - string workflow_id = 3; -} diff --git a/flyrs/protos/flyteidl/service/admin.proto b/flyrs/protos/flyteidl/service/admin.proto deleted file mode 100644 index 2004842b0b..0000000000 --- a/flyrs/protos/flyteidl/service/admin.proto +++ /dev/null @@ -1,659 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "flyteidl/admin/project.proto"; -import "flyteidl/admin/project_domain_attributes.proto"; -import "flyteidl/admin/project_attributes.proto"; -import "flyteidl/admin/task.proto"; -import "flyteidl/admin/workflow.proto"; -import "flyteidl/admin/workflow_attributes.proto"; -import "flyteidl/admin/launch_plan.proto"; -import "flyteidl/admin/event.proto"; -import "flyteidl/admin/execution.proto"; -import "flyteidl/admin/matchable_resource.proto"; -import "flyteidl/admin/node_execution.proto"; -import "flyteidl/admin/task_execution.proto"; -import "flyteidl/admin/version.proto"; -import "flyteidl/admin/common.proto"; -import "flyteidl/admin/description_entity.proto"; -import "protoc-gen-openapiv2/options/annotations.proto"; - - -// The following defines an RPC service that is also served over HTTP via grpc-gateway. -// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go -service AdminService { - // Create and upload a :ref:`ref_flyteidl.admin.Task` definition - rpc CreateTask (flyteidl.admin.TaskCreateRequest) returns (flyteidl.admin.TaskCreateResponse) { - option (google.api.http) = { - post: "/api/v1/tasks" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create and register a task definition." - responses: { - key: "400" - value: { - description: "Returned for bad request that may have failed validation." - } - } - responses: { - key: "409" - value: { - description: "Returned for a request that references an identical entity that has already been registered." - } - } - }; - } - - // Fetch a :ref:`ref_flyteidl.admin.Task` definition. - rpc GetTask (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.Task) { - option (google.api.http) = { - get: "/api/v1/tasks/{id.project}/{id.domain}/{id.name}/{id.version}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing task definition." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. - rpc ListTaskIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { - option (google.api.http) = { - get: "/api/v1/task_ids/{project}/{domain}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing task definition identifiers matching input filters." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. - rpc ListTasks (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.TaskList) { - option (google.api.http) = { - get: "/api/v1/tasks/{id.project}/{id.domain}/{id.name}" - additional_bindings { - get: "/api/v1/tasks/{id.project}/{id.domain}", - } - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing task definitions matching input filters." - }; - } - - // Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition - rpc CreateWorkflow (flyteidl.admin.WorkflowCreateRequest) returns (flyteidl.admin.WorkflowCreateResponse) { - option (google.api.http) = { - post: "/api/v1/workflows" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create and register a workflow definition." - responses: { - key: "400" - value: { - description: "Returned for bad request that may have failed validation." - } - } - responses: { - key: "409" - value: { - description: "Returned for a request that references an identical entity that has already been registered." - } - } - }; - } - - // Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. - rpc GetWorkflow (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.Workflow) { - option (google.api.http) = { - get: "/api/v1/workflows/{id.project}/{id.domain}/{id.name}/{id.version}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing workflow definition." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. - rpc ListWorkflowIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { - option (google.api.http) = { - get: "/api/v1/workflow_ids/{project}/{domain}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Fetch an existing workflow definition identifiers matching input filters." - // }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. - rpc ListWorkflows (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.WorkflowList) { - option (google.api.http) = { - get: "/api/v1/workflows/{id.project}/{id.domain}/{id.name}" - additional_bindings { - get: "/api/v1/workflows/{id.project}/{id.domain}", - } - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing workflow definitions matching input filters." - }; - } - - // Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition - rpc CreateLaunchPlan (flyteidl.admin.LaunchPlanCreateRequest) returns (flyteidl.admin.LaunchPlanCreateResponse) { - option (google.api.http) = { - post: "/api/v1/launch_plans" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create and register a launch plan definition." - responses: { - key: "400" - value: { - description: "Returned for bad request that may have failed validation." - } - } - responses: { - key: "409" - value: { - description: "Returned for a request that references an identical entity that has already been registered." - } - } - }; - } - - // Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. - rpc GetLaunchPlan (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.LaunchPlan) { - option (google.api.http) = { - get: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}/{id.version}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing launch plan definition." - }; - } - - // Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. - rpc GetActiveLaunchPlan (flyteidl.admin.ActiveLaunchPlanRequest) returns (flyteidl.admin.LaunchPlan) { - option (google.api.http) = { - get: "/api/v1/active_launch_plans/{id.project}/{id.domain}/{id.name}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve the active launch plan version specified by input request filters." - }; - } - - // List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. - rpc ListActiveLaunchPlans (flyteidl.admin.ActiveLaunchPlanListRequest) returns (flyteidl.admin.LaunchPlanList) { - option (google.api.http) = { - get: "/api/v1/active_launch_plans/{project}/{domain}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch the active launch plan versions specified by input request filters." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. - rpc ListLaunchPlanIds (flyteidl.admin.NamedEntityIdentifierListRequest) returns (flyteidl.admin.NamedEntityIdentifierList) { - option (google.api.http) = { - get: "/api/v1/launch_plan_ids/{project}/{domain}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing launch plan definition identifiers matching input filters." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. - rpc ListLaunchPlans (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.LaunchPlanList) { - option (google.api.http) = { - get: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}" - additional_bindings { - get: "/api/v1/launch_plans/{id.project}/{id.domain}" - } - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing launch plan definitions matching input filters." - }; - } - - // Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. - rpc UpdateLaunchPlan (flyteidl.admin.LaunchPlanUpdateRequest) returns (flyteidl.admin.LaunchPlanUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/launch_plans/{id.project}/{id.domain}/{id.name}/{id.version}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update the status of an existing launch plan definition. " - "At most one launch plan version for a given {project, domain, name} can be active at a time. " - "If this call sets a launch plan to active and existing version is already active, the result of this call will be that the " - "formerly active launch plan will be made inactive and specified launch plan in this request will be made active. " - "In the event that the formerly active launch plan had a schedule associated it with it, this schedule will be disabled. " - "If the reference launch plan in this request is being set to active and has a schedule associated with it, the schedule will be enabled." - }; - } - - // Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` - rpc CreateExecution (flyteidl.admin.ExecutionCreateRequest) returns (flyteidl.admin.ExecutionCreateResponse) { - option (google.api.http) = { - post: "/api/v1/executions" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create a workflow execution." - }; - } - - // Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` - rpc RelaunchExecution (flyteidl.admin.ExecutionRelaunchRequest) returns (flyteidl.admin.ExecutionCreateResponse) { - option (google.api.http) = { - post: "/api/v1/executions/relaunch" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Relaunch a workflow execution." - }; - } - - // Recreates a previously-run workflow execution that will only start executing from the last known failure point. - // In Recover mode, users cannot change any input parameters or update the version of the execution. - // This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, - // downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. - // See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. - rpc RecoverExecution (flyteidl.admin.ExecutionRecoverRequest) returns (flyteidl.admin.ExecutionCreateResponse) { - option (google.api.http) = { - post: "/api/v1/executions/recover" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Recreates a previously-run workflow execution that will only start executing from the last known failure point. " - "In Recover mode, users cannot change any input parameters or update the version of the execution. " - "This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, " - "downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again." - }; - } - - // Fetches a :ref:`ref_flyteidl.admin.Execution`. - rpc GetExecution (flyteidl.admin.WorkflowExecutionGetRequest) returns (flyteidl.admin.Execution) { - option (google.api.http) = { - get: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing workflow execution." - }; - } - - // Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. - rpc UpdateExecution (flyteidl.admin.ExecutionUpdateRequest) returns (flyteidl.admin.ExecutionUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" - body: "*" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Update execution belonging to project domain." - // }; - } - - // Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. - rpc GetExecutionData (flyteidl.admin.WorkflowExecutionGetDataRequest) returns (flyteidl.admin.WorkflowExecutionGetDataResponse) { - option (google.api.http) = { - get: "/api/v1/data/executions/{id.project}/{id.domain}/{id.name}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Retrieve input and output data from an existing workflow execution." - // }; - }; - - // Fetch a list of :ref:`ref_flyteidl.admin.Execution`. - rpc ListExecutions (flyteidl.admin.ResourceListRequest) returns (flyteidl.admin.ExecutionList) { - option (google.api.http) = { - get: "/api/v1/executions/{id.project}/{id.domain}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Fetch existing workflow executions matching input filters." - // }; - } - - // Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. - rpc TerminateExecution (flyteidl.admin.ExecutionTerminateRequest) returns (flyteidl.admin.ExecutionTerminateResponse) { - option (google.api.http) = { - delete: "/api/v1/executions/{id.project}/{id.domain}/{id.name}" - body: "*" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Terminate the active workflow execution specified in the request." - // }; - } - - // Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. - rpc GetNodeExecution (flyteidl.admin.NodeExecutionGetRequest) returns (flyteidl.admin.NodeExecution) { - option (google.api.http) = { - get: "/api/v1/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Retrieve an existing node execution." - // }; - } - - // Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. - rpc GetDynamicNodeWorkflow (flyteidl.admin.GetDynamicNodeWorkflowRequest) returns (flyteidl.admin.DynamicNodeWorkflowResponse) { - option (google.api.http) = { - get: "/api/v1/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}/dynamic_workflow" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Retrieve a workflow closure from a dynamic node execution." - // }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. - rpc ListNodeExecutions (flyteidl.admin.NodeExecutionListRequest) returns (flyteidl.admin.NodeExecutionList) { - option (google.api.http) = { - get: "/api/v1/node_executions/{workflow_execution_id.project}/{workflow_execution_id.domain}/{workflow_execution_id.name}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Fetch existing node executions matching input filters." - // }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. - rpc ListNodeExecutionsForTask (flyteidl.admin.NodeExecutionForTaskListRequest) returns (flyteidl.admin.NodeExecutionList) { - option (google.api.http) = { - get: "/api/v1/children/task_executions/{task_execution_id.node_execution_id.execution_id.project}/{task_execution_id.node_execution_id.execution_id.domain}/{task_execution_id.node_execution_id.execution_id.name}/{task_execution_id.node_execution_id.node_id}/{task_execution_id.task_id.project}/{task_execution_id.task_id.domain}/{task_execution_id.task_id.name}/{task_execution_id.task_id.version}/{task_execution_id.retry_attempt}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Fetch child node executions launched by the specified task execution." - // }; - } - - // Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. - rpc GetNodeExecutionData (flyteidl.admin.NodeExecutionGetDataRequest) returns (flyteidl.admin.NodeExecutionGetDataResponse) { - option (google.api.http) = { - get: "/api/v1/data/node_executions/{id.execution_id.project}/{id.execution_id.domain}/{id.execution_id.name}/{id.node_id}" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Retrieve input and output data from an existing node execution." - // }; - }; - - // Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. - rpc RegisterProject (flyteidl.admin.ProjectRegisterRequest) returns (flyteidl.admin.ProjectRegisterResponse) { - option (google.api.http) = { - post: "/api/v1/projects" - body: "*" - }; - // option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - // description: "Register a project." - // }; - } - - // Updates an existing :ref:`ref_flyteidl.admin.Project` - // flyteidl.admin.Project should be passed but the domains property should be empty; - // it will be ignored in the handler as domains cannot be updated via this API. - rpc UpdateProject (flyteidl.admin.Project) returns (flyteidl.admin.ProjectUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/projects/{id}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update a project." - }; - } - - // Fetches a :ref:`ref_flyteidl.admin.Project` - rpc GetProject (flyteidl.admin.ProjectGetRequest) returns (flyteidl.admin.Project) { - option (google.api.http) = { - get: "/api/v1/projects/{id}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch a registered project." - }; - } - - // Fetches a list of :ref:`ref_flyteidl.admin.Project` - rpc ListProjects (flyteidl.admin.ProjectListRequest) returns (flyteidl.admin.Projects) { - option (google.api.http) = { - get: "/api/v1/projects" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch registered projects." - }; - } - - // Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. - rpc CreateWorkflowEvent (flyteidl.admin.WorkflowExecutionEventRequest) returns (flyteidl.admin.WorkflowExecutionEventResponse) { - option (google.api.http) = { - post: "/api/v1/events/workflows" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create a workflow execution event recording a phase transition." - }; - } - - // Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. - rpc CreateNodeEvent (flyteidl.admin.NodeExecutionEventRequest) returns (flyteidl.admin.NodeExecutionEventResponse) { - option (google.api.http) = { - post: "/api/v1/events/nodes" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create a node execution event recording a phase transition." - }; - } - - // Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. - rpc CreateTaskEvent (flyteidl.admin.TaskExecutionEventRequest) returns (flyteidl.admin.TaskExecutionEventResponse) { - option (google.api.http) = { - post: "/api/v1/events/tasks" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Create a task execution event recording a phase transition." - }; - } - - // Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. - rpc GetTaskExecution (flyteidl.admin.TaskExecutionGetRequest) returns (flyteidl.admin.TaskExecution) { - option (google.api.http) = { - get: "/api/v1/task_executions/{id.node_execution_id.execution_id.project}/{id.node_execution_id.execution_id.domain}/{id.node_execution_id.execution_id.name}/{id.node_execution_id.node_id}/{id.task_id.project}/{id.task_id.domain}/{id.task_id.name}/{id.task_id.version}/{id.retry_attempt}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing task execution." - }; - } - - // Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. - rpc ListTaskExecutions (flyteidl.admin.TaskExecutionListRequest) returns (flyteidl.admin.TaskExecutionList) { - option (google.api.http) = { - get: "/api/v1/task_executions/{node_execution_id.execution_id.project}/{node_execution_id.execution_id.domain}/{node_execution_id.execution_id.name}/{node_execution_id.node_id}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing task executions matching input filters." - }; - - } - - // Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. - rpc GetTaskExecutionData (flyteidl.admin.TaskExecutionGetDataRequest) returns (flyteidl.admin.TaskExecutionGetDataResponse) { - option (google.api.http) = { - get: "/api/v1/data/task_executions/{id.node_execution_id.execution_id.project}/{id.node_execution_id.execution_id.domain}/{id.node_execution_id.execution_id.name}/{id.node_execution_id.node_id}/{id.task_id.project}/{id.task_id.domain}/{id.task_id.name}/{id.task_id.version}/{id.retry_attempt}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve input and output data from an existing task execution." - }; - } - - // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - rpc UpdateProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesUpdateRequest) returns (flyteidl.admin.ProjectDomainAttributesUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/project_domain_attributes/{attributes.project}/{attributes.domain}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update the customized resource attributes associated with a project-domain combination" - }; - } - - // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - rpc GetProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesGetRequest) returns (flyteidl.admin.ProjectDomainAttributesGetResponse) { - option (google.api.http) = { - get: "/api/v1/project_domain_attributes/{project}/{domain}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve the customized resource attributes associated with a project-domain combination" - }; - } - - // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - rpc DeleteProjectDomainAttributes (flyteidl.admin.ProjectDomainAttributesDeleteRequest) returns (flyteidl.admin.ProjectDomainAttributesDeleteResponse) { - option (google.api.http) = { - delete: "/api/v1/project_domain_attributes/{project}/{domain}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Delete the customized resource attributes associated with a project-domain combination" - }; - } - - // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level - rpc UpdateProjectAttributes (flyteidl.admin.ProjectAttributesUpdateRequest) returns (flyteidl.admin.ProjectAttributesUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/project_attributes/{attributes.project}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update the customized resource attributes associated with a project" - }; - } - - // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - rpc GetProjectAttributes (flyteidl.admin.ProjectAttributesGetRequest) returns (flyteidl.admin.ProjectAttributesGetResponse) { - option (google.api.http) = { - get: "/api/v1/project_attributes/{project}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve the customized resource attributes associated with a project" - }; - } - - // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - rpc DeleteProjectAttributes (flyteidl.admin.ProjectAttributesDeleteRequest) returns (flyteidl.admin.ProjectAttributesDeleteResponse) { - option (google.api.http) = { - delete: "/api/v1/project_attributes/{project}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Delete the customized resource attributes associated with a project" - }; - } - // Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - rpc UpdateWorkflowAttributes (flyteidl.admin.WorkflowAttributesUpdateRequest) returns (flyteidl.admin.WorkflowAttributesUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/workflow_attributes/{attributes.project}/{attributes.domain}/{attributes.workflow}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update the customized resource attributes associated with a project, domain and workflow combination" - }; - } - - // Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - rpc GetWorkflowAttributes (flyteidl.admin.WorkflowAttributesGetRequest) returns (flyteidl.admin.WorkflowAttributesGetResponse) { - option (google.api.http) = { - get: "/api/v1/workflow_attributes/{project}/{domain}/{workflow}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve the customized resource attributes associated with a project, domain and workflow combination" - }; - } - - // Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - rpc DeleteWorkflowAttributes (flyteidl.admin.WorkflowAttributesDeleteRequest) returns (flyteidl.admin.WorkflowAttributesDeleteResponse) { - option (google.api.http) = { - delete: "/api/v1/workflow_attributes/{project}/{domain}/{workflow}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Delete the customized resource attributes associated with a project, domain and workflow combination" - }; - } - - // Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. - rpc ListMatchableAttributes (flyteidl.admin.ListMatchableAttributesRequest) returns (flyteidl.admin.ListMatchableAttributesResponse) { - option (google.api.http) = { - get: "/api/v1/matchable_attributes" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve a list of MatchableAttributesConfiguration objects." - }; - } - - // Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. - rpc ListNamedEntities (flyteidl.admin.NamedEntityListRequest) returns (flyteidl.admin.NamedEntityList) { - option (google.api.http) = { - get: "/api/v1/named_entities/{resource_type}/{project}/{domain}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve a list of NamedEntity objects sharing a common resource type, project, and domain." - }; - } - - // Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. - rpc GetNamedEntity (flyteidl.admin.NamedEntityGetRequest) returns (flyteidl.admin.NamedEntity) { - option (google.api.http) = { - get: "/api/v1/named_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve a NamedEntity object." - }; - } - - // Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. - rpc UpdateNamedEntity (flyteidl.admin.NamedEntityUpdateRequest) returns (flyteidl.admin.NamedEntityUpdateResponse) { - option (google.api.http) = { - put: "/api/v1/named_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Update the fields associated with a NamedEntity" - }; - } - - rpc GetVersion (flyteidl.admin.GetVersionRequest) returns (flyteidl.admin.GetVersionResponse) { - option (google.api.http) = { - get: "/api/v1/version" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve the Version (including the Build information) for FlyteAdmin service" - }; - } - - // Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. - rpc GetDescriptionEntity (flyteidl.admin.ObjectGetRequest) returns (flyteidl.admin.DescriptionEntity) { - option (google.api.http) = { - get: "/api/v1/description_entities/{id.resource_type}/{id.project}/{id.domain}/{id.name}/{id.version}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve an existing description entity description." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. - rpc ListDescriptionEntities (flyteidl.admin.DescriptionEntityListRequest) returns (flyteidl.admin.DescriptionEntityList) { - option (google.api.http) = { - get: "/api/v1/description_entities/{resource_type}/{id.project}/{id.domain}/{id.name}" - additional_bindings { - get: "/api/v1/description_entities/{resource_type}/{id.project}/{id.domain}" - } - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing description entity definitions matching input filters." - }; - } - - // Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. - rpc GetExecutionMetrics (flyteidl.admin.WorkflowExecutionGetMetricsRequest) returns (flyteidl.admin.WorkflowExecutionGetMetricsResponse) { - option (google.api.http) = { - get: "/api/v1/metrics/executions/{id.project}/{id.domain}/{id.name}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve metrics from an existing workflow execution." - }; - }; -} diff --git a/flyrs/protos/flyteidl/service/agent.proto b/flyrs/protos/flyteidl/service/agent.proto deleted file mode 100644 index cd6b93a972..0000000000 --- a/flyrs/protos/flyteidl/service/agent.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "flyteidl/admin/agent.proto"; - -// SyncAgentService defines an RPC Service that allows propeller to send the request to the agent server synchronously. -service SyncAgentService { - // ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. - rpc ExecuteTaskSync (stream flyteidl.admin.ExecuteTaskSyncRequest) returns (stream flyteidl.admin.ExecuteTaskSyncResponse){ - option (google.api.http) = { - post: "/api/v1/agent/task/stream" - body: "*" - }; - }; -} - -// AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. -service AsyncAgentService { - // CreateTask sends a task create request to the agent service. - rpc CreateTask (flyteidl.admin.CreateTaskRequest) returns (flyteidl.admin.CreateTaskResponse){ - option (google.api.http) = { - post: "/api/v1/agent/task" - body: "*" - }; - }; - - // Get job status. - rpc GetTask (flyteidl.admin.GetTaskRequest) returns (flyteidl.admin.GetTaskResponse){ - option (google.api.http) = { - get: "/api/v1/agent/task/{task_category.name}/{task_category.version}/{resource_meta}" - }; - }; - - // Delete the task resource. - rpc DeleteTask (flyteidl.admin.DeleteTaskRequest) returns (flyteidl.admin.DeleteTaskResponse){ - option (google.api.http) = { - delete: "/api/v1/agent/task_executions/{task_category.name}/{task_category.version}/{resource_meta}" - }; - }; - - // GetTaskMetrics returns one or more task execution metrics, if available. - // - // Errors include - // * OutOfRange if metrics are not available for the specified task time range - // * various other errors - rpc GetTaskMetrics(flyteidl.admin.GetTaskMetricsRequest) returns (flyteidl.admin.GetTaskMetricsResponse){ - option (google.api.http) = { - get: "/api/v1/agent/task/metrics/{task_category.name}/{task_category.version}/{resource_meta}" - }; - }; - - // GetTaskLogs returns task execution logs, if available. - rpc GetTaskLogs(flyteidl.admin.GetTaskLogsRequest) returns (stream flyteidl.admin.GetTaskLogsResponse){ - option (google.api.http) = { - get: "/api/v1/agent/task/logs/{task_category.name}/{task_category.version}/{resource_meta}" - }; - }; -} - -// AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. -// This service allows propeller or users to get the metadata of agents. -service AgentMetadataService { - // Fetch a :ref:`ref_flyteidl.admin.Agent` definition. - rpc GetAgent (flyteidl.admin.GetAgentRequest) returns (flyteidl.admin.GetAgentResponse){ - option (google.api.http) = { - get: "/api/v1/agent/{name}" - }; - }; - - // Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. - rpc ListAgents (flyteidl.admin.ListAgentsRequest) returns (flyteidl.admin.ListAgentsResponse){ - option (google.api.http) = { - get: "/api/v1/agents" - }; - }; -} diff --git a/flyrs/protos/flyteidl/service/auth.proto b/flyrs/protos/flyteidl/service/auth.proto deleted file mode 100644 index a340f05add..0000000000 --- a/flyrs/protos/flyteidl/service/auth.proto +++ /dev/null @@ -1,94 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "protoc-gen-openapiv2/options/annotations.proto"; - -message OAuth2MetadataRequest {} - -// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata -// as defined in https://tools.ietf.org/html/rfc8414 -message OAuth2MetadataResponse { - // Defines the issuer string in all JWT tokens this server issues. The issuer can be admin itself or an external - // issuer. - string issuer = 1; - - // URL of the authorization server's authorization endpoint [RFC6749]. This is REQUIRED unless no grant types are - // supported that use the authorization endpoint. - string authorization_endpoint = 2; - - // URL of the authorization server's token endpoint [RFC6749]. - string token_endpoint = 3; - - // Array containing a list of the OAuth 2.0 response_type values that this authorization server supports. - repeated string response_types_supported = 4; - - // JSON array containing a list of the OAuth 2.0 [RFC6749] scope values that this authorization server supports. - repeated string scopes_supported = 5; - - // JSON array containing a list of client authentication methods supported by this token endpoint. - repeated string token_endpoint_auth_methods_supported = 6; - - // URL of the authorization server's JWK Set [JWK] document. The referenced document contains the signing key(s) the - // client uses to validate signatures from the authorization server. - string jwks_uri = 7; - - // JSON array containing a list of Proof Key for Code Exchange (PKCE) [RFC7636] code challenge methods supported by - // this authorization server. - repeated string code_challenge_methods_supported = 8; - - // JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. - repeated string grant_types_supported = 9; - - // URL of the authorization server's device authorization endpoint, as defined in Section 3.1 of [RFC8628] - string device_authorization_endpoint = 10; -} - -message PublicClientAuthConfigRequest {} - -// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. -message PublicClientAuthConfigResponse { - // client_id to use when initiating OAuth2 authorization requests. - string client_id = 1; - // redirect uri to use when initiating OAuth2 authorization requests. - string redirect_uri = 2; - // scopes to request when initiating OAuth2 authorization requests. - repeated string scopes = 3; - // Authorization Header to use when passing Access Tokens to the server. If not provided, the client should use the - // default http `Authorization` header. - string authorization_metadata_key = 4; - // ServiceHttpEndpoint points to the http endpoint for the backend. If empty, clients can assume the endpoint used - // to configure the gRPC connection can be used for the http one respecting the insecure flag to choose between - // SSL or no SSL connections. - string service_http_endpoint = 5; - // audience to use when initiating OAuth2 authorization requests. - string audience = 6; -} - -// The following defines an RPC service that is also served over HTTP via grpc-gateway. -// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go -// RPCs defined in this service must be anonymously accessible. -service AuthMetadataService { - // Anonymously accessible. Retrieves local or external oauth authorization server metadata. - rpc GetOAuth2Metadata (OAuth2MetadataRequest) returns (OAuth2MetadataResponse) { - option (google.api.http) = { - get: "/.well-known/oauth-authorization-server" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieves OAuth2 authorization server metadata. This endpoint is anonymously accessible." - }; - } - - // Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization - // requests. - rpc GetPublicClientConfig (PublicClientAuthConfigRequest) returns (PublicClientAuthConfigResponse) { - option (google.api.http) = { - get: "/config/v1/flyte_client" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieves public flyte client info. This endpoint is anonymously accessible." - }; - } -} diff --git a/flyrs/protos/flyteidl/service/dataproxy.proto b/flyrs/protos/flyteidl/service/dataproxy.proto deleted file mode 100644 index 86c7c4d977..0000000000 --- a/flyrs/protos/flyteidl/service/dataproxy.proto +++ /dev/null @@ -1,205 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "protoc-gen-openapiv2/options/annotations.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/timestamp.proto"; -import "flyteidl/core/identifier.proto"; -import "flyteidl/core/literals.proto"; - - -message CreateUploadLocationResponse { - // SignedUrl specifies the url to use to upload content to (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) - string signed_url = 1; - - // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - string native_url = 2; - - // ExpiresAt defines when will the signed URL expires. - google.protobuf.Timestamp expires_at = 3; - - // Data proxy generates these headers for client, and they have to add these headers to the request when uploading the file. - map headers = 4; -} - -// CreateUploadLocationRequest specified request for the CreateUploadLocation API. -// The implementation in data proxy service will create the s3 location with some server side configured prefixes, -// and then: -// - project/domain/(a deterministic str representation of the content_md5)/filename (if present); OR -// - project/domain/filename_root (if present)/filename (if present). -message CreateUploadLocationRequest { - // Project to create the upload location for - // +required - string project = 1; - - // Domain to create the upload location for. - // +required - string domain = 2; - - // Filename specifies a desired suffix for the generated location. E.g. `file.py` or `pre/fix/file.zip`. - // +optional. By default, the service will generate a consistent name based on the provided parameters. - string filename = 3; - - // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - // exceeds the platform allowed max. - // +optional. The default value comes from a global config. - google.protobuf.Duration expires_in = 4; - - // ContentMD5 restricts the upload location to the specific MD5 provided. The ContentMD5 will also appear in the - // generated path. - // +required - bytes content_md5 = 5; - - // If present, data proxy will use this string in lieu of the md5 hash in the path. When the filename is also included - // this makes the upload location deterministic. The native url will still be prefixed by the upload location prefix - // in data proxy config. This option is useful when uploading multiple files. - // +optional - string filename_root = 6; - - // If true, the data proxy will add content_md5 to the metadata to the signed URL and - // it will force clients to add this metadata to the object. - // This make sure dataproxy is backward compatible with the old flytekit. - bool add_content_md5_metadata = 7; - - - // Optional, org key applied to the resource. - string org = 8; -} - -// CreateDownloadLocationRequest specified request for the CreateDownloadLocation API. -message CreateDownloadLocationRequest { - option deprecated = true; - // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - string native_url = 1; - - // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - // exceeds the platform allowed max. - // +optional. The default value comes from a global config. - google.protobuf.Duration expires_in = 2; -} - -message CreateDownloadLocationResponse { - option deprecated = true; - // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) - string signed_url = 1; - // ExpiresAt defines when will the signed URL expires. - google.protobuf.Timestamp expires_at = 2; -} - -// ArtifactType -enum ArtifactType { - // ARTIFACT_TYPE_UNDEFINED is the default, often invalid, value for the enum. - ARTIFACT_TYPE_UNDEFINED = 0; - - // ARTIFACT_TYPE_DECK refers to the deck html file optionally generated after a task, a workflow or a launch plan - // finishes executing. - ARTIFACT_TYPE_DECK = 1; -} - -// CreateDownloadLinkRequest defines the request parameters to create a download link (signed url) -message CreateDownloadLinkRequest { - // ArtifactType of the artifact requested. - ArtifactType artifact_type = 1; - - // ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - // exceeds the platform allowed max. - // +optional. The default value comes from a global config. - google.protobuf.Duration expires_in = 2; - - oneof source { - // NodeId is the unique identifier for the node execution. For a task node, this will retrieve the output of the - // most recent attempt of the task. - core.NodeExecutionIdentifier node_execution_id = 3; - } -} - -// CreateDownloadLinkResponse defines the response for the generated links -message CreateDownloadLinkResponse { - // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) - repeated string signed_url = 1 [deprecated = true]; - - // ExpiresAt defines when will the signed URL expire. - google.protobuf.Timestamp expires_at = 2 [deprecated = true]; - - // New wrapper object containing the signed urls and expiration time - PreSignedURLs pre_signed_urls = 3; -} - -// Wrapper object since the message is shared across this and the GetDataResponse -message PreSignedURLs { - // SignedUrl specifies the url to use to download content from (e.g. https://my-bucket.s3.amazonaws.com/randomstring/suffix.tar?X-...) - repeated string signed_url = 1; - - // ExpiresAt defines when will the signed URL expire. - google.protobuf.Timestamp expires_at = 2; -} - -// General request artifact to retrieve data from a Flyte artifact url. -message GetDataRequest { - // A unique identifier in the form of flyte:// that uniquely, for a given Flyte - // backend, identifies a Flyte artifact ([i]nput, [o]output, flyte [d]eck, etc.). - // e.g. flyte://v1/proj/development/execid/n2/0/i (for 0th task execution attempt input) - // flyte://v1/proj/development/execid/n2/i (for node execution input) - // flyte://v1/proj/development/execid/n2/o/o3 (the o3 output of the second node) - string flyte_url = 1; -} - -message GetDataResponse { - oneof data { - // literal map data will be returned - core.LiteralMap literal_map = 1; - - // Flyte deck html will be returned as a signed url users can download - PreSignedURLs pre_signed_urls = 2; - - // Single literal will be returned. This is returned when the user/url requests a specific output or input - // by name. See the o3 example above. - core.Literal literal = 3; - } -} - -// DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. -service DataProxyService { - // CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. - rpc CreateUploadLocation (CreateUploadLocationRequest) returns (CreateUploadLocationResponse) { - option (google.api.http) = { - post: "/api/v1/dataproxy/artifact_urn" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Creates a write-only http location that is accessible for tasks at runtime." - }; - } - - // CreateDownloadLocation creates a signed url to download artifacts. - rpc CreateDownloadLocation (CreateDownloadLocationRequest) returns (CreateDownloadLocationResponse) { - option deprecated = true; - option (google.api.http) = { - get: "/api/v1/dataproxy/artifact_urn" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Deprecated: Please use CreateDownloadLink instead. Creates a read-only http location that is accessible for tasks at runtime." - }; - } - - // CreateDownloadLocation creates a signed url to download artifacts. - rpc CreateDownloadLink (CreateDownloadLinkRequest) returns (CreateDownloadLinkResponse) { - option (google.api.http) = { - post: "/api/v1/dataproxy/artifact_link" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Creates a read-only http location that is accessible for tasks at runtime." - }; - } - - rpc GetData (GetDataRequest) returns (GetDataResponse) { - // Takes an address like flyte://v1/proj/development/execid/n2/0/i and return the actual data - option (google.api.http) = { - get: "/api/v1/data" - }; - } -} diff --git a/flyrs/protos/flyteidl/service/external_plugin_service.proto b/flyrs/protos/flyteidl/service/external_plugin_service.proto deleted file mode 100644 index a3035290e2..0000000000 --- a/flyrs/protos/flyteidl/service/external_plugin_service.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; -import "flyteidl/core/literals.proto"; -import "flyteidl/core/tasks.proto"; - -// ExternalPluginService defines an RPC Service that allows propeller to send the request to the backend plugin server. -service ExternalPluginService { - // Send a task create request to the backend plugin server. - rpc CreateTask (TaskCreateRequest) returns (TaskCreateResponse){option deprecated = true;}; - // Get job status. - rpc GetTask (TaskGetRequest) returns (TaskGetResponse){option deprecated = true;}; - // Delete the task resource. - rpc DeleteTask (TaskDeleteRequest) returns (TaskDeleteResponse){option deprecated = true;}; -} - -// The state of the execution is used to control its visibility in the UI/CLI. -enum State { - option deprecated = true; - RETRYABLE_FAILURE = 0; - PERMANENT_FAILURE = 1; - PENDING = 2; - RUNNING = 3; - SUCCEEDED = 4; -} - -// Represents a request structure to create task. -message TaskCreateRequest { - option deprecated = true; - // The inputs required to start the execution. All required inputs must be - // included in this map. If not required and not provided, defaults apply. - // +optional - core.LiteralMap inputs = 1; - // Template of the task that encapsulates all the metadata of the task. - core.TaskTemplate template = 2; - // Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - string output_prefix = 3; -} - -// Represents a create response structure. -message TaskCreateResponse { - option deprecated = true; - string job_id = 1; -} - -// A message used to fetch a job state from backend plugin server. -message TaskGetRequest { - option deprecated = true; - // A predefined yet extensible Task type identifier. - string task_type = 1; - // The unique id identifying the job. - string job_id = 2; -} - -// Response to get an individual task state. -message TaskGetResponse { - option deprecated = true; - // The state of the execution is used to control its visibility in the UI/CLI. - State state = 1; - // The outputs of the execution. It's typically used by sql task. Flyteplugins service will create a - // Structured dataset pointing to the query result table. - // +optional - core.LiteralMap outputs = 2; -} - -// A message used to delete a task. -message TaskDeleteRequest { - option deprecated = true; - // A predefined yet extensible Task type identifier. - string task_type = 1; - // The unique id identifying the job. - string job_id = 2; -} - -// Response to delete a task. -message TaskDeleteResponse { - option deprecated = true; -} diff --git a/flyrs/protos/flyteidl/service/identity.proto b/flyrs/protos/flyteidl/service/identity.proto deleted file mode 100644 index 244bb9aaeb..0000000000 --- a/flyrs/protos/flyteidl/service/identity.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "google/protobuf/struct.proto"; -import "protoc-gen-openapiv2/options/annotations.proto"; - -message UserInfoRequest {} - -// See the OpenID Connect spec at https://openid.net/specs/openid-connect-core-1_0.html#UserInfoResponse for more information. -message UserInfoResponse { - // Locally unique and never reassigned identifier within the Issuer for the End-User, which is intended to be consumed - // by the Client. - string subject = 1; - - // Full name - string name = 2; - - // Shorthand name by which the End-User wishes to be referred to - string preferred_username = 3; - - // Given name(s) or first name(s) - string given_name = 4; - - // Surname(s) or last name(s) - string family_name = 5; - - // Preferred e-mail address - string email = 6; - - // Profile picture URL - string picture = 7; - - // Additional claims - google.protobuf.Struct additional_claims = 8; -} - -// IdentityService defines an RPC Service that interacts with user/app identities. -service IdentityService { - // Retrieves user information about the currently logged in user. - rpc UserInfo (UserInfoRequest) returns (UserInfoResponse) { - option (google.api.http) = { - get: "/me" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieves authenticated identity info." - }; - } -} diff --git a/flyrs/protos/flyteidl/service/signal.proto b/flyrs/protos/flyteidl/service/signal.proto deleted file mode 100644 index b1b927979b..0000000000 --- a/flyrs/protos/flyteidl/service/signal.proto +++ /dev/null @@ -1,55 +0,0 @@ -syntax = "proto3"; -package flyteidl.service; - -option go_package = "github.com/flyteorg/flyte/flyteidl/gen/pb-go/flyteidl/service"; - -import "google/api/annotations.proto"; -import "flyteidl/admin/signal.proto"; -import "protoc-gen-openapiv2/options/annotations.proto"; - -// SignalService defines an RPC Service that may create, update, and retrieve signal(s). -service SignalService { - // Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. - rpc GetOrCreateSignal (flyteidl.admin.SignalGetOrCreateRequest) returns (flyteidl.admin.Signal) { - // Purposefully left out an HTTP API for this RPC call. This is meant to idempotently retrieve - // a signal, meaning the first call will create the signal and all subsequent calls will - // fetch the existing signal. This is only useful during Flyte Workflow execution and therefore - // is not exposed to mitigate unintended behavior. - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Retrieve a signal, creating it if it does not exist." - }; - } - - // Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. - rpc ListSignals (flyteidl.admin.SignalListRequest) returns (flyteidl.admin.SignalList) { - option (google.api.http) = { - get: "/api/v1/signals/{workflow_execution_id.project}/{workflow_execution_id.domain}/{workflow_execution_id.name}" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Fetch existing signal definitions matching the input signal id filters." - }; - } - - // Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition - rpc SetSignal (flyteidl.admin.SignalSetRequest) returns (flyteidl.admin.SignalSetResponse) { - option (google.api.http) = { - post: "/api/v1/signals" - body: "*" - }; - option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { - description: "Set a signal value." - responses: { - key: "400" - value: { - description: "Returned for bad request that may have failed validation." - } - } - responses: { - key: "409" - value: { - description: "Returned for a request that references an identical entity that has already been registered." - } - } - }; - } -} diff --git a/flyrs/protos/google/api/annotations.proto b/flyrs/protos/google/api/annotations.proto deleted file mode 100644 index 85c361b47f..0000000000 --- a/flyrs/protos/google/api/annotations.proto +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2015, Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/api/http.proto"; -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "AnnotationsProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // See `HttpRule`. - HttpRule http = 72295728; -} diff --git a/flyrs/protos/google/api/client.proto b/flyrs/protos/google/api/client.proto deleted file mode 100644 index 2102623d30..0000000000 --- a/flyrs/protos/google/api/client.proto +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ClientProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.MethodOptions { - // A definition of a client library method signature. - // - // In client libraries, each proto RPC corresponds to one or more methods - // which the end user is able to call, and calls the underlying RPC. - // Normally, this method receives a single argument (a struct or instance - // corresponding to the RPC request object). Defining this field will - // add one or more overloads providing flattened or simpler method signatures - // in some languages. - // - // The fields on the method signature are provided as a comma-separated - // string. - // - // For example, the proto RPC and annotation: - // - // rpc CreateSubscription(CreateSubscriptionRequest) - // returns (Subscription) { - // option (google.api.method_signature) = "name,topic"; - // } - // - // Would add the following Java overload (in addition to the method accepting - // the request object): - // - // public final Subscription createSubscription(String name, String topic) - // - // The following backwards-compatibility guidelines apply: - // - // * Adding this annotation to an unannotated method is backwards - // compatible. - // * Adding this annotation to a method which already has existing - // method signature annotations is backwards compatible if and only if - // the new method signature annotation is last in the sequence. - // * Modifying or removing an existing method signature annotation is - // a breaking change. - // * Re-ordering existing method signature annotations is a breaking - // change. - repeated string method_signature = 1051; -} - -extend google.protobuf.ServiceOptions { - // The hostname for this service. - // This should be specified with no prefix or protocol. - // - // Example: - // - // service Foo { - // option (google.api.default_host) = "foo.googleapi.com"; - // ... - // } - string default_host = 1049; - - // OAuth scopes needed for the client. - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform"; - // ... - // } - // - // If there is more than one scope, use a comma-separated string: - // - // Example: - // - // service Foo { - // option (google.api.oauth_scopes) = \ - // "https://www.googleapis.com/auth/cloud-platform," - // "https://www.googleapis.com/auth/monitoring"; - // ... - // } - string oauth_scopes = 1050; -} diff --git a/flyrs/protos/google/api/field_behavior.proto b/flyrs/protos/google/api/field_behavior.proto deleted file mode 100644 index 686667954a..0000000000 --- a/flyrs/protos/google/api/field_behavior.proto +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "FieldBehaviorProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // A designation of a specific field behavior (required, output only, etc.) - // in protobuf messages. - // - // Examples: - // - // string name = 1 [(google.api.field_behavior) = REQUIRED]; - // State state = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - // google.protobuf.Duration ttl = 1 - // [(google.api.field_behavior) = INPUT_ONLY]; - // google.protobuf.Timestamp expire_time = 1 - // [(google.api.field_behavior) = OUTPUT_ONLY, - // (google.api.field_behavior) = IMMUTABLE]; - repeated google.api.FieldBehavior field_behavior = 1052; -} - -// An indicator of the behavior of a given field (for example, that a field -// is required in requests, or given as output but ignored as input). -// This **does not** change the behavior in protocol buffers itself; it only -// denotes the behavior and may affect how API tooling handles the field. -// -// Note: This enum **may** receive new values in the future. -enum FieldBehavior { - // Conventional default for enums. Do not use this. - FIELD_BEHAVIOR_UNSPECIFIED = 0; - - // Specifically denotes a field as optional. - // While all fields in protocol buffers are optional, this may be specified - // for emphasis if appropriate. - OPTIONAL = 1; - - // Denotes a field as required. - // This indicates that the field **must** be provided as part of the request, - // and failure to do so will cause an error (usually `INVALID_ARGUMENT`). - REQUIRED = 2; - - // Denotes a field as output only. - // This indicates that the field is provided in responses, but including the - // field in a request does nothing (the server *must* ignore it and - // *must not* throw an error as a result of the field's presence). - OUTPUT_ONLY = 3; - - // Denotes a field as input only. - // This indicates that the field is provided in requests, and the - // corresponding field is not included in output. - INPUT_ONLY = 4; - - // Denotes a field as immutable. - // This indicates that the field may be set once in a request to create a - // resource, but may not be changed thereafter. - IMMUTABLE = 5; - - // Denotes that a (repeated) field is an unordered list. - // This indicates that the service may provide the elements of the list - // in any arbitrary order, rather than the order the user originally - // provided. Additionally, the list's order may or may not be stable. - UNORDERED_LIST = 6; -} diff --git a/flyrs/protos/google/api/http.proto b/flyrs/protos/google/api/http.proto deleted file mode 100644 index 69460cf791..0000000000 --- a/flyrs/protos/google/api/http.proto +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "HttpProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -// Defines the HTTP configuration for an API service. It contains a list of -// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -// to one or more HTTP REST API methods. -message Http { - // A list of HTTP configuration rules that apply to individual API methods. - // - // **NOTE:** All service configuration rules follow "last one wins" order. - repeated HttpRule rules = 1; - - // When set to true, URL path parameters will be fully URI-decoded except in - // cases of single segment matches in reserved expansion, where "%2F" will be - // left encoded. - // - // The default behavior is to not decode RFC 6570 reserved characters in multi - // segment matches. - bool fully_decode_reserved_expansion = 2; -} - -// # gRPC Transcoding -// -// gRPC Transcoding is a feature for mapping between a gRPC method and one or -// more HTTP REST endpoints. It allows developers to build a single API service -// that supports both gRPC APIs and REST APIs. Many systems, including [Google -// APIs](https://github.com/googleapis/googleapis), -// [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC -// Gateway](https://github.com/grpc-ecosystem/grpc-gateway), -// and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature -// and use it for large scale production services. -// -// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -// how different portions of the gRPC request message are mapped to the URL -// path, URL query parameters, and HTTP request body. It also controls how the -// gRPC response message is mapped to the HTTP response body. `HttpRule` is -// typically specified as an `google.api.http` annotation on the gRPC method. -// -// Each mapping specifies a URL path template and an HTTP method. The path -// template may refer to one or more fields in the gRPC request message, as long -// as each field is a non-repeated field with a primitive (non-message) type. -// The path template controls how fields of the request message are mapped to -// the URL path. -// -// Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/{name=messages/*}" -// }; -// } -// } -// message GetMessageRequest { -// string name = 1; // Mapped to URL path. -// } -// message Message { -// string text = 1; // The resource content. -// } -// -// This enables an HTTP REST to gRPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -// -// Any fields in the request message which are not bound by the path template -// automatically become HTTP query parameters if there is no HTTP request body. -// For example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get:"/v1/messages/{message_id}" -// }; -// } -// } -// message GetMessageRequest { -// message SubMessage { -// string subfield = 1; -// } -// string message_id = 1; // Mapped to URL path. -// int64 revision = 2; // Mapped to URL query parameter `revision`. -// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -// } -// -// This enables a HTTP JSON to RPC mapping as below: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -// "foo"))` -// -// Note that fields which are mapped to URL query parameters must have a -// primitive type or a repeated primitive type or a non-repeated message type. -// In the case of a repeated type, the parameter can be repeated in the URL -// as `...?param=A¶m=B`. In the case of a message type, each field of the -// message is mapped to a separate parameter, such as -// `...?foo.a=A&foo.b=B&foo.c=C`. -// -// For HTTP methods that allow a request body, the `body` field -// specifies the mapping. Consider a REST update method on the -// message resource collection: -// -// service Messaging { -// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "message" -// }; -// } -// } -// message UpdateMessageRequest { -// string message_id = 1; // mapped to the URL -// Message message = 2; // mapped to the body -// } -// -// The following HTTP JSON to RPC mapping is enabled, where the -// representation of the JSON in the request body is determined by -// protos JSON encoding: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" message { text: "Hi!" })` -// -// The special name `*` can be used in the body mapping to define that -// every field not bound by the path template should be mapped to the -// request body. This enables the following alternative definition of -// the update method: -// -// service Messaging { -// rpc UpdateMessage(Message) returns (Message) { -// option (google.api.http) = { -// patch: "/v1/messages/{message_id}" -// body: "*" -// }; -// } -// } -// message Message { -// string message_id = 1; -// string text = 2; -// } -// -// -// The following HTTP JSON to RPC mapping is enabled: -// -// HTTP | gRPC -// -----|----- -// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -// "123456" text: "Hi!")` -// -// Note that when using `*` in the body mapping, it is not possible to -// have HTTP parameters, as all fields not bound by the path end in -// the body. This makes this option more rarely used in practice when -// defining REST APIs. The common usage of `*` is in custom methods -// which don't use the URL at all for transferring data. -// -// It is possible to define multiple HTTP methods for one RPC by using -// the `additional_bindings` option. Example: -// -// service Messaging { -// rpc GetMessage(GetMessageRequest) returns (Message) { -// option (google.api.http) = { -// get: "/v1/messages/{message_id}" -// additional_bindings { -// get: "/v1/users/{user_id}/messages/{message_id}" -// } -// }; -// } -// } -// message GetMessageRequest { -// string message_id = 1; -// string user_id = 2; -// } -// -// This enables the following two alternative HTTP JSON to RPC mappings: -// -// HTTP | gRPC -// -----|----- -// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -// "123456")` -// -// ## Rules for HTTP mapping -// -// 1. Leaf request fields (recursive expansion nested messages in the request -// message) are classified into three categories: -// - Fields referred by the path template. They are passed via the URL path. -// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -// request body. -// - All other fields are passed via the URL query parameters, and the -// parameter name is the field path in the request message. A repeated -// field can be represented as multiple query parameters under the same -// name. -// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -// are passed via URL path and HTTP request body. -// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -// fields are passed via URL path and URL query parameters. -// -// ### Path template syntax -// -// Template = "/" Segments [ Verb ] ; -// Segments = Segment { "/" Segment } ; -// Segment = "*" | "**" | LITERAL | Variable ; -// Variable = "{" FieldPath [ "=" Segments ] "}" ; -// FieldPath = IDENT { "." IDENT } ; -// Verb = ":" LITERAL ; -// -// The syntax `*` matches a single URL path segment. The syntax `**` matches -// zero or more URL path segments, which must be the last part of the URL path -// except the `Verb`. -// -// The syntax `Variable` matches part of the URL path as specified by its -// template. A variable template must not contain other variables. If a variable -// matches a single path segment, its template may be omitted, e.g. `{var}` -// is equivalent to `{var=*}`. -// -// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -// contains any reserved character, such characters should be percent-encoded -// before the matching. -// -// If a variable contains exactly one path segment, such as `"{var}"` or -// `"{var=*}"`, when such a variable is expanded into a URL path on the client -// side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The -// server side does the reverse decoding. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{var}`. -// -// If a variable contains multiple path segments, such as `"{var=foo/*}"` -// or `"{var=**}"`, when such a variable is expanded into a URL path on the -// client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. -// The server side does the reverse decoding, except "%2F" and "%2f" are left -// unchanged. Such variables show up in the -// [Discovery -// Document](https://developers.google.com/discovery/v1/reference/apis) as -// `{+var}`. -// -// ## Using gRPC API Service Configuration -// -// gRPC API Service Configuration (service config) is a configuration language -// for configuring a gRPC service to become a user-facing product. The -// service config is simply the YAML representation of the `google.api.Service` -// proto message. -// -// As an alternative to annotating your proto file, you can configure gRPC -// transcoding in your service config YAML files. You do this by specifying a -// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -// effect as the proto annotation. This can be particularly useful if you -// have a proto that is reused in multiple services. Note that any transcoding -// specified in the service config will override any matching transcoding -// configuration in the proto. -// -// Example: -// -// http: -// rules: -// # Selects a gRPC method and applies HttpRule to it. -// - selector: example.v1.Messaging.GetMessage -// get: /v1/messages/{message_id}/{sub.subfield} -// -// ## Special notes -// -// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -// proto to JSON conversion must follow the [proto3 -// specification](https://developers.google.com/protocol-buffers/docs/proto3#json). -// -// While the single segment variable follows the semantics of -// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String -// Expansion, the multi segment variable **does not** follow RFC 6570 Section -// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -// does not expand special characters like `?` and `#`, which would lead -// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -// for multi segment variables. -// -// The path variables **must not** refer to any repeated or mapped field, -// because client libraries are not capable of handling such variable expansion. -// -// The path variables **must not** capture the leading "/" character. The reason -// is that the most common use case "{var}" does not capture the leading "/" -// character. For consistency, all path variables must share the same behavior. -// -// Repeated message fields must not be mapped to URL query parameters, because -// no client library can support such complicated mapping. -// -// If an API needs to use a JSON array for request or response body, it can map -// the request or response body to a repeated field. However, some gRPC -// Transcoding implementations may not support this feature. -message HttpRule { - // Selects a method to which this rule applies. - // - // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - string selector = 1; - - // Determines the URL pattern is matched by this rules. This pattern can be - // used with any of the {get|put|post|delete|patch} methods. A custom method - // can be defined using the 'custom' field. - oneof pattern { - // Maps to HTTP GET. Used for listing and getting information about - // resources. - string get = 2; - - // Maps to HTTP PUT. Used for replacing a resource. - string put = 3; - - // Maps to HTTP POST. Used for creating a resource or performing an action. - string post = 4; - - // Maps to HTTP DELETE. Used for deleting a resource. - string delete = 5; - - // Maps to HTTP PATCH. Used for updating a resource. - string patch = 6; - - // The custom pattern is used for specifying an HTTP method that is not - // included in the `pattern` field, such as HEAD, or "*" to leave the - // HTTP method unspecified for this rule. The wild-card rule is useful - // for services that provide content to Web (HTML) clients. - CustomHttpPattern custom = 8; - } - - // The name of the request field whose value is mapped to the HTTP request - // body, or `*` for mapping all request fields not captured by the path - // pattern to the HTTP body, or omitted for not having any HTTP request body. - // - // NOTE: the referred field must be present at the top-level of the request - // message type. - string body = 7; - - // Optional. The name of the response field whose value is mapped to the HTTP - // response body. When omitted, the entire response message will be used - // as the HTTP response body. - // - // NOTE: The referred field must be present at the top-level of the response - // message type. - string response_body = 12; - - // Additional HTTP bindings for the selector. Nested bindings must - // not contain an `additional_bindings` field themselves (that is, - // the nesting may only be one level deep). - repeated HttpRule additional_bindings = 11; -} - -// A custom pattern is used for defining custom HTTP verb. -message CustomHttpPattern { - // The name of this custom HTTP verb. - string kind = 1; - - // The path matched by this custom verb. - string path = 2; -} diff --git a/flyrs/protos/google/api/resource.proto b/flyrs/protos/google/api/resource.proto deleted file mode 100644 index fd9ee66def..0000000000 --- a/flyrs/protos/google/api/resource.proto +++ /dev/null @@ -1,299 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.api; - -import "google/protobuf/descriptor.proto"; - -option cc_enable_arenas = true; -option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; -option java_multiple_files = true; -option java_outer_classname = "ResourceProto"; -option java_package = "com.google.api"; -option objc_class_prefix = "GAPI"; - -extend google.protobuf.FieldOptions { - // An annotation that describes a resource reference, see - // [ResourceReference][]. - google.api.ResourceReference resource_reference = 1055; -} - -extend google.protobuf.FileOptions { - // An annotation that describes a resource definition without a corresponding - // message; see [ResourceDescriptor][]. - repeated google.api.ResourceDescriptor resource_definition = 1053; -} - -extend google.protobuf.MessageOptions { - // An annotation that describes a resource definition, see - // [ResourceDescriptor][]. - google.api.ResourceDescriptor resource = 1053; -} - -// A simple descriptor of a resource type. -// -// ResourceDescriptor annotates a resource message (either by means of a -// protobuf annotation or use in the service config), and associates the -// resource's schema, the resource type, and the pattern of the resource name. -// -// Example: -// -// message Topic { -// // Indicates this message defines a resource schema. -// // Declares the resource type in the format of {service}/{kind}. -// // For Kubernetes resources, the format is {api group}/{kind}. -// option (google.api.resource) = { -// type: "pubsub.googleapis.com/Topic" -// name_descriptor: { -// pattern: "projects/{project}/topics/{topic}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: "pubsub.googleapis.com/Topic" -// name_descriptor: -// - pattern: "projects/{project}/topics/{topic}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// -// Sometimes, resources have multiple patterns, typically because they can -// live under multiple parents. -// -// Example: -// -// message LogEntry { -// option (google.api.resource) = { -// type: "logging.googleapis.com/LogEntry" -// name_descriptor: { -// pattern: "projects/{project}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// } -// name_descriptor: { -// pattern: "folders/{folder}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// parent_name_extractor: "folders/{folder}" -// } -// name_descriptor: { -// pattern: "organizations/{organization}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Organization" -// parent_name_extractor: "organizations/{organization}" -// } -// name_descriptor: { -// pattern: "billingAccounts/{billing_account}/logs/{log}" -// parent_type: "billing.googleapis.com/BillingAccount" -// parent_name_extractor: "billingAccounts/{billing_account}" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'logging.googleapis.com/LogEntry' -// name_descriptor: -// - pattern: "projects/{project}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// parent_name_extractor: "projects/{project}" -// - pattern: "folders/{folder}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// parent_name_extractor: "folders/{folder}" -// - pattern: "organizations/{organization}/logs/{log}" -// parent_type: "cloudresourcemanager.googleapis.com/Organization" -// parent_name_extractor: "organizations/{organization}" -// - pattern: "billingAccounts/{billing_account}/logs/{log}" -// parent_type: "billing.googleapis.com/BillingAccount" -// parent_name_extractor: "billingAccounts/{billing_account}" -// -// For flexible resources, the resource name doesn't contain parent names, but -// the resource itself has parents for policy evaluation. -// -// Example: -// -// message Shelf { -// option (google.api.resource) = { -// type: "library.googleapis.com/Shelf" -// name_descriptor: { -// pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// } -// name_descriptor: { -// pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -// } -// }; -// } -// -// The ResourceDescriptor Yaml config will look like: -// -// resources: -// - type: 'library.googleapis.com/Shelf' -// name_descriptor: -// - pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Project" -// - pattern: "shelves/{shelf}" -// parent_type: "cloudresourcemanager.googleapis.com/Folder" -message ResourceDescriptor { - // A description of the historical or future-looking state of the - // resource pattern. - enum History { - // The "unset" value. - HISTORY_UNSPECIFIED = 0; - - // The resource originally had one pattern and launched as such, and - // additional patterns were added later. - ORIGINALLY_SINGLE_PATTERN = 1; - - // The resource has one pattern, but the API owner expects to add more - // later. (This is the inverse of ORIGINALLY_SINGLE_PATTERN, and prevents - // that from being necessary once there are multiple patterns.) - FUTURE_MULTI_PATTERN = 2; - } - - // A flag representing a specific style that a resource claims to conform to. - enum Style { - // The unspecified value. Do not use. - STYLE_UNSPECIFIED = 0; - - // This resource is intended to be "declarative-friendly". - // - // Declarative-friendly resources must be more strictly consistent, and - // setting this to true communicates to tools that this resource should - // adhere to declarative-friendly expectations. - // - // Note: This is used by the API linter (linter.aip.dev) to enable - // additional checks. - DECLARATIVE_FRIENDLY = 1; - } - - // The resource type. It must be in the format of - // {service_name}/{resource_type_kind}. The `resource_type_kind` must be - // singular and must not include version numbers. - // - // Example: `storage.googleapis.com/Bucket` - // - // The value of the resource_type_kind must follow the regular expression - // /[A-Za-z][a-zA-Z0-9]+/. It should start with an upper case character and - // should use PascalCase (UpperCamelCase). The maximum number of - // characters allowed for the `resource_type_kind` is 100. - string type = 1; - - // Optional. The relative resource name pattern associated with this resource - // type. The DNS prefix of the full resource name shouldn't be specified here. - // - // The path pattern must follow the syntax, which aligns with HTTP binding - // syntax: - // - // Template = Segment { "/" Segment } ; - // Segment = LITERAL | Variable ; - // Variable = "{" LITERAL "}" ; - // - // Examples: - // - // - "projects/{project}/topics/{topic}" - // - "projects/{project}/knowledgeBases/{knowledge_base}" - // - // The components in braces correspond to the IDs for each resource in the - // hierarchy. It is expected that, if multiple patterns are provided, - // the same component name (e.g. "project") refers to IDs of the same - // type of resource. - repeated string pattern = 2; - - // Optional. The field on the resource that designates the resource name - // field. If omitted, this is assumed to be "name". - string name_field = 3; - - // Optional. The historical or future-looking state of the resource pattern. - // - // Example: - // - // // The InspectTemplate message originally only supported resource - // // names with organization, and project was added later. - // message InspectTemplate { - // option (google.api.resource) = { - // type: "dlp.googleapis.com/InspectTemplate" - // pattern: - // "organizations/{organization}/inspectTemplates/{inspect_template}" - // pattern: "projects/{project}/inspectTemplates/{inspect_template}" - // history: ORIGINALLY_SINGLE_PATTERN - // }; - // } - History history = 4; - - // The plural name used in the resource name and permission names, such as - // 'projects' for the resource name of 'projects/{project}' and the permission - // name of 'cloudresourcemanager.googleapis.com/projects.get'. It is the same - // concept of the `plural` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // - // Note: The plural form is required even for singleton resources. See - // https://aip.dev/156 - string plural = 5; - - // The same concept of the `singular` field in k8s CRD spec - // https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/ - // Such as "project" for the `resourcemanager.googleapis.com/Project` type. - string singular = 6; - - // Style flag(s) for this resource. - // These indicate that a resource is expected to conform to a given - // style. See the specific style flags for additional information. - repeated Style style = 10; -} - -// Defines a proto annotation that describes a string field that refers to -// an API resource. -message ResourceReference { - // The resource type that the annotated field references. - // - // Example: - // - // message Subscription { - // string topic = 2 [(google.api.resource_reference) = { - // type: "pubsub.googleapis.com/Topic" - // }]; - // } - // - // Occasionally, a field may reference an arbitrary resource. In this case, - // APIs use the special value * in their resource reference. - // - // Example: - // - // message GetIamPolicyRequest { - // string resource = 2 [(google.api.resource_reference) = { - // type: "*" - // }]; - // } - string type = 1; - - // The resource type of a child collection that the annotated field - // references. This is useful for annotating the `parent` field that - // doesn't have a fixed resource type. - // - // Example: - // - // message ListLogEntriesRequest { - // string parent = 1 [(google.api.resource_reference) = { - // child_type: "logging.googleapis.com/LogEntry" - // }; - // } - string child_type = 2; -} diff --git a/flyrs/protos/google/pubsub/v1/pubsub.proto b/flyrs/protos/google/pubsub/v1/pubsub.proto deleted file mode 100644 index 9bc678e3ae..0000000000 --- a/flyrs/protos/google/pubsub/v1/pubsub.proto +++ /dev/null @@ -1,1316 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.pubsub.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/field_mask.proto"; -import "google/protobuf/timestamp.proto"; -import "google/pubsub/v1/schema.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.PubSub.V1"; -option go_package = "google.golang.org/genproto/googleapis/pubsub/v1;pubsub"; -option java_multiple_files = true; -option java_outer_classname = "PubsubProto"; -option java_package = "com.google.pubsub.v1"; -option php_namespace = "Google\\Cloud\\PubSub\\V1"; -option ruby_package = "Google::Cloud::PubSub::V1"; - -// The service that an application uses to manipulate topics, and to send -// messages to a topic. -service Publisher { - option (google.api.default_host) = "pubsub.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/pubsub"; - - // Creates the given topic with the given name. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/admin#resource_names). - rpc CreateTopic(Topic) returns (Topic) { - option (google.api.http) = { - put: "/v1/{name=projects/*/topics/*}" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Updates an existing topic. Note that certain properties of a - // topic are not modifiable. - rpc UpdateTopic(UpdateTopicRequest) returns (Topic) { - option (google.api.http) = { - patch: "/v1/{topic.name=projects/*/topics/*}" - body: "*" - }; - } - - // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic - // does not exist. - rpc Publish(PublishRequest) returns (PublishResponse) { - option (google.api.http) = { - post: "/v1/{topic=projects/*/topics/*}:publish" - body: "*" - }; - option (google.api.method_signature) = "topic,messages"; - } - - // Gets the configuration of a topic. - rpc GetTopic(GetTopicRequest) returns (Topic) { - option (google.api.http) = { - get: "/v1/{topic=projects/*/topics/*}" - }; - option (google.api.method_signature) = "topic"; - } - - // Lists matching topics. - rpc ListTopics(ListTopicsRequest) returns (ListTopicsResponse) { - option (google.api.http) = { - get: "/v1/{project=projects/*}/topics" - }; - option (google.api.method_signature) = "project"; - } - - // Lists the names of the attached subscriptions on this topic. - rpc ListTopicSubscriptions(ListTopicSubscriptionsRequest) - returns (ListTopicSubscriptionsResponse) { - option (google.api.http) = { - get: "/v1/{topic=projects/*/topics/*}/subscriptions" - }; - option (google.api.method_signature) = "topic"; - } - - // Lists the names of the snapshots on this topic. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - rpc ListTopicSnapshots(ListTopicSnapshotsRequest) - returns (ListTopicSnapshotsResponse) { - option (google.api.http) = { - get: "/v1/{topic=projects/*/topics/*}/snapshots" - }; - option (google.api.method_signature) = "topic"; - } - - // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic - // does not exist. After a topic is deleted, a new topic may be created with - // the same name; this is an entirely new topic with none of the old - // configuration or subscriptions. Existing subscriptions to this topic are - // not deleted, but their `topic` field is set to `_deleted-topic_`. - rpc DeleteTopic(DeleteTopicRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{topic=projects/*/topics/*}" - }; - option (google.api.method_signature) = "topic"; - } - - // Detaches a subscription from this topic. All messages retained in the - // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests - // will return FAILED_PRECONDITION. If the subscription is a push - // subscription, pushes to the endpoint will stop. - rpc DetachSubscription(DetachSubscriptionRequest) - returns (DetachSubscriptionResponse) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:detach" - }; - } -} - -// A policy constraining the storage of messages published to the topic. -message MessageStoragePolicy { - // A list of IDs of GCP regions where messages that are published to the topic - // may be persisted in storage. Messages published by publishers running in - // non-allowed GCP regions (or running outside of GCP altogether) will be - // routed for storage in one of the allowed regions. An empty list means that - // no regions are allowed, and is not a valid configuration. - repeated string allowed_persistence_regions = 1; -} - -// Settings for validating messages published against a schema. -message SchemaSettings { - // Required. The name of the schema that messages published should be - // validated against. Format is `projects/{project}/schemas/{schema}`. The - // value of this field will be `_deleted-schema_` if the schema has been - // deleted. - string schema = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } - ]; - - // The encoding of messages validated against `schema`. - Encoding encoding = 2; -} - -// A topic resource. -message Topic { - option (google.api.resource) = { - type: "pubsub.googleapis.com/Topic" - pattern: "projects/{project}/topics/{topic}" - pattern: "_deleted-topic_" - }; - - // Required. The name of the topic. It must have the format - // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, - // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), - // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent - // signs (`%`). It must be between 3 and 255 characters in length, and it - // must not start with `"goog"`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // See [Creating and managing labels] - // (https://cloud.google.com/pubsub/docs/labels). - map labels = 2; - - // Policy constraining the set of Google Cloud Platform regions where messages - // published to the topic may be stored. If not present, then no constraints - // are in effect. - MessageStoragePolicy message_storage_policy = 3; - - // The resource name of the Cloud KMS CryptoKey to be used to protect access - // to messages published on this topic. - // - // The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. - string kms_key_name = 5; - - // Settings for validating messages published against a schema. - // - // EXPERIMENTAL: Schema support is in development and may not work yet. - SchemaSettings schema_settings = 6; - - // Reserved for future use. This field is set only in responses from the - // server; it is ignored if it is set in any requests. - bool satisfies_pzs = 7; -} - -// A message that is published by publishers and consumed by subscribers. The -// message must contain either a non-empty data field or at least one attribute. -// Note that client libraries represent this object differently -// depending on the language. See the corresponding [client library -// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for -// more information. See [quotas and limits] -// (https://cloud.google.com/pubsub/quotas) for more information about message -// limits. -message PubsubMessage { - // The message data field. If this field is empty, the message must contain - // at least one attribute. - bytes data = 1; - - // Attributes for this message. If this field is empty, the message must - // contain non-empty data. This can be used to filter messages on the - // subscription. - map attributes = 2; - - // ID of this message, assigned by the server when the message is published. - // Guaranteed to be unique within the topic. This value may be read by a - // subscriber that receives a `PubsubMessage` via a `Pull` call or a push - // delivery. It must not be populated by the publisher in a `Publish` call. - string message_id = 3; - - // The time at which the message was published, populated by the server when - // it receives the `Publish` call. It must not be populated by the - // publisher in a `Publish` call. - google.protobuf.Timestamp publish_time = 4; - - // If non-empty, identifies related messages for which publish order should be - // respected. If a `Subscription` has `enable_message_ordering` set to `true`, - // messages published with the same non-empty `ordering_key` value will be - // delivered to subscribers in the order in which they are received by the - // Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` - // must specify the same `ordering_key` value. - string ordering_key = 5; -} - -// Request for the GetTopic method. -message GetTopicRequest { - // Required. The name of the topic to get. - // Format is `projects/{project}/topics/{topic}`. - string topic = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; -} - -// Request for the UpdateTopic method. -message UpdateTopicRequest { - // Required. The updated topic object. - Topic topic = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Indicates which fields in the provided topic to update. Must be - // specified and non-empty. Note that if `update_mask` contains - // "message_storage_policy" but the `message_storage_policy` is not set in - // the `topic` provided above, then the updated value is determined by the - // policy configured at the project or organization level. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; -} - -// Request for the Publish method. -message PublishRequest { - // Required. The messages in the request will be published on this topic. - // Format is `projects/{project}/topics/{topic}`. - string topic = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; - - // Required. The messages to publish. - repeated PubsubMessage messages = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response for the `Publish` method. -message PublishResponse { - // The server-assigned ID of each published message, in the same order as - // the messages in the request. IDs are guaranteed to be unique within - // the topic. - repeated string message_ids = 1; -} - -// Request for the `ListTopics` method. -message ListTopicsRequest { - // Required. The name of the project in which to list topics. - // Format is `projects/{project-id}`. - string project = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Maximum number of topics to return. - int32 page_size = 2; - - // The value returned by the last `ListTopicsResponse`; indicates that this is - // a continuation of a prior `ListTopics` call, and that the system should - // return the next page of data. - string page_token = 3; -} - -// Response for the `ListTopics` method. -message ListTopicsResponse { - // The resulting topics. - repeated Topic topics = 1; - - // If not empty, indicates that there may be more topics that match the - // request; this value should be passed in a new `ListTopicsRequest`. - string next_page_token = 2; -} - -// Request for the `ListTopicSubscriptions` method. -message ListTopicSubscriptionsRequest { - // Required. The name of the topic that subscriptions are attached to. - // Format is `projects/{project}/topics/{topic}`. - string topic = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; - - // Maximum number of subscription names to return. - int32 page_size = 2; - - // The value returned by the last `ListTopicSubscriptionsResponse`; indicates - // that this is a continuation of a prior `ListTopicSubscriptions` call, and - // that the system should return the next page of data. - string page_token = 3; -} - -// Response for the `ListTopicSubscriptions` method. -message ListTopicSubscriptionsResponse { - // The names of subscriptions attached to the topic specified in the request. - repeated string subscriptions = 1 [(google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - }]; - - // If not empty, indicates that there may be more subscriptions that match - // the request; this value should be passed in a new - // `ListTopicSubscriptionsRequest` to get more subscriptions. - string next_page_token = 2; -} - -// Request for the `ListTopicSnapshots` method. -message ListTopicSnapshotsRequest { - // Required. The name of the topic that snapshots are attached to. - // Format is `projects/{project}/topics/{topic}`. - string topic = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; - - // Maximum number of snapshot names to return. - int32 page_size = 2; - - // The value returned by the last `ListTopicSnapshotsResponse`; indicates - // that this is a continuation of a prior `ListTopicSnapshots` call, and - // that the system should return the next page of data. - string page_token = 3; -} - -// Response for the `ListTopicSnapshots` method. -message ListTopicSnapshotsResponse { - // The names of the snapshots that match the request. - repeated string snapshots = 1; - - // If not empty, indicates that there may be more snapshots that match - // the request; this value should be passed in a new - // `ListTopicSnapshotsRequest` to get more snapshots. - string next_page_token = 2; -} - -// Request for the `DeleteTopic` method. -message DeleteTopicRequest { - // Required. Name of the topic to delete. - // Format is `projects/{project}/topics/{topic}`. - string topic = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; -} - -// Request for the DetachSubscription method. -message DetachSubscriptionRequest { - // Required. The subscription to detach. - // Format is `projects/{project}/subscriptions/{subscription}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; -} - -// Response for the DetachSubscription method. -// Reserved for future use. -message DetachSubscriptionResponse {} - -// The service that an application uses to manipulate subscriptions and to -// consume messages from a subscription via the `Pull` method or by -// establishing a bi-directional stream using the `StreamingPull` method. -service Subscriber { - option (google.api.default_host) = "pubsub.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/pubsub"; - - // Creates a subscription to a given topic. See the [resource name rules] - // (https://cloud.google.com/pubsub/docs/admin#resource_names). - // If the subscription already exists, returns `ALREADY_EXISTS`. - // If the corresponding topic doesn't exist, returns `NOT_FOUND`. - // - // If the name is not provided in the request, the server will assign a random - // name for this subscription on the same project as the topic, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated - // name is populated in the returned Subscription object. Note that for REST - // API requests, you must specify a name in the request. - rpc CreateSubscription(Subscription) returns (Subscription) { - option (google.api.http) = { - put: "/v1/{name=projects/*/subscriptions/*}" - body: "*" - }; - option (google.api.method_signature) = - "name,topic,push_config,ack_deadline_seconds"; - } - - // Gets the configuration details of a subscription. - rpc GetSubscription(GetSubscriptionRequest) returns (Subscription) { - option (google.api.http) = { - get: "/v1/{subscription=projects/*/subscriptions/*}" - }; - option (google.api.method_signature) = "subscription"; - } - - // Updates an existing subscription. Note that certain properties of a - // subscription, such as its topic, are not modifiable. - rpc UpdateSubscription(UpdateSubscriptionRequest) returns (Subscription) { - option (google.api.http) = { - patch: "/v1/{subscription.name=projects/*/subscriptions/*}" - body: "*" - }; - } - - // Lists matching subscriptions. - rpc ListSubscriptions(ListSubscriptionsRequest) - returns (ListSubscriptionsResponse) { - option (google.api.http) = { - get: "/v1/{project=projects/*}/subscriptions" - }; - option (google.api.method_signature) = "project"; - } - - // Deletes an existing subscription. All messages retained in the subscription - // are immediately dropped. Calls to `Pull` after deletion will return - // `NOT_FOUND`. After a subscription is deleted, a new one may be created with - // the same name, but the new one has no association with the old - // subscription or its topic unless the same topic is specified. - rpc DeleteSubscription(DeleteSubscriptionRequest) - returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{subscription=projects/*/subscriptions/*}" - }; - option (google.api.method_signature) = "subscription"; - } - - // Modifies the ack deadline for a specific message. This method is useful - // to indicate that more time is needed to process a message by the - // subscriber, or to make the message available for redelivery if the - // processing was interrupted. Note that this does not modify the - // subscription-level `ackDeadlineSeconds` used for subsequent messages. - rpc ModifyAckDeadline(ModifyAckDeadlineRequest) - returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:modifyAckDeadline" - body: "*" - }; - option (google.api.method_signature) = - "subscription,ack_ids,ack_deadline_seconds"; - } - - // Acknowledges the messages associated with the `ack_ids` in the - // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages - // from the subscription. - // - // Acknowledging a message whose ack deadline has expired may succeed, - // but such a message may be redelivered later. Acknowledging a message more - // than once will not result in an error. - rpc Acknowledge(AcknowledgeRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:acknowledge" - body: "*" - }; - option (google.api.method_signature) = "subscription,ack_ids"; - } - - // Pulls messages from the server. The server may return `UNAVAILABLE` if - // there are too many concurrent pull requests pending for the given - // subscription. - rpc Pull(PullRequest) returns (PullResponse) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:pull" - body: "*" - }; - option (google.api.method_signature) = - "subscription,return_immediately,max_messages"; - } - - // Establishes a stream with the server, which sends messages down to the - // client. The client streams acknowledgements and ack deadline modifications - // back to the server. The server will close the stream and return the status - // on any error. The server may close the stream with status `UNAVAILABLE` to - // reassign server-side resources, in which case, the client should - // re-establish the stream. Flow control can be achieved by configuring the - // underlying RPC channel. - rpc StreamingPull(stream StreamingPullRequest) - returns (stream StreamingPullResponse) {} - - // Modifies the `PushConfig` for a specified subscription. - // - // This may be used to change a push subscription to a pull one (signified by - // an empty `PushConfig`) or vice versa, or change the endpoint URL and other - // attributes of a push subscription. Messages will accumulate for delivery - // continuously through the call regardless of changes to the `PushConfig`. - rpc ModifyPushConfig(ModifyPushConfigRequest) - returns (google.protobuf.Empty) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:modifyPushConfig" - body: "*" - }; - option (google.api.method_signature) = "subscription,push_config"; - } - - // Gets the configuration details of a snapshot. Snapshots are used in - // Seek - // operations, which allow you to manage message acknowledgments in bulk. That - // is, you can set the acknowledgment state of messages in an existing - // subscription to the state captured by a snapshot. - rpc GetSnapshot(GetSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - get: "/v1/{snapshot=projects/*/snapshots/*}" - }; - option (google.api.method_signature) = "snapshot"; - } - - // Lists the existing snapshots. Snapshots are used in [Seek]( - // https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - rpc ListSnapshots(ListSnapshotsRequest) returns (ListSnapshotsResponse) { - option (google.api.http) = { - get: "/v1/{project=projects/*}/snapshots" - }; - option (google.api.method_signature) = "project"; - } - - // Creates a snapshot from the requested subscription. Snapshots are used in - // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, - // which allow you to manage message acknowledgments in bulk. That is, you can - // set the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // If the snapshot already exists, returns `ALREADY_EXISTS`. - // If the requested subscription doesn't exist, returns `NOT_FOUND`. - // If the backlog in the subscription is too old -- and the resulting snapshot - // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. - // See also the `Snapshot.expire_time` field. If the name is not provided in - // the request, the server will assign a random - // name for this snapshot on the same project as the subscription, conforming - // to the [resource name format] - // (https://cloud.google.com/pubsub/docs/admin#resource_names). The - // generated name is populated in the returned Snapshot object. Note that for - // REST API requests, you must specify a name in the request. - rpc CreateSnapshot(CreateSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - put: "/v1/{name=projects/*/snapshots/*}" - body: "*" - }; - option (google.api.method_signature) = "name,subscription"; - } - - // Updates an existing snapshot. Snapshots are used in - // Seek - // operations, which allow - // you to manage message acknowledgments in bulk. That is, you can set the - // acknowledgment state of messages in an existing subscription to the state - // captured by a snapshot. - rpc UpdateSnapshot(UpdateSnapshotRequest) returns (Snapshot) { - option (google.api.http) = { - patch: "/v1/{snapshot.name=projects/*/snapshots/*}" - body: "*" - }; - } - - // Removes an existing snapshot. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. - // When the snapshot is deleted, all messages retained in the snapshot - // are immediately dropped. After a snapshot is deleted, a new one may be - // created with the same name, but the new one has no association with the old - // snapshot or its subscription, unless the same subscription is specified. - rpc DeleteSnapshot(DeleteSnapshotRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{snapshot=projects/*/snapshots/*}" - }; - option (google.api.method_signature) = "snapshot"; - } - - // Seeks an existing subscription to a point in time or to a given snapshot, - // whichever is provided in the request. Snapshots are used in [Seek] - // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which - // allow you to manage message acknowledgments in bulk. That is, you can set - // the acknowledgment state of messages in an existing subscription to the - // state captured by a snapshot. Note that both the subscription and the - // snapshot must be on the same topic. - rpc Seek(SeekRequest) returns (SeekResponse) { - option (google.api.http) = { - post: "/v1/{subscription=projects/*/subscriptions/*}:seek" - body: "*" - }; - } -} - -// A subscription resource. -message Subscription { - option (google.api.resource) = { - type: "pubsub.googleapis.com/Subscription" - pattern: "projects/{project}/subscriptions/{subscription}" - }; - - // Required. The name of the subscription. It must have the format - // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must - // start with a letter, and contain only letters (`[A-Za-z]`), numbers - // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), - // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters - // in length, and it must not start with `"goog"`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The name of the topic from which this subscription is receiving - // messages. Format is `projects/{project}/topics/{topic}`. The value of this - // field will be `_deleted-topic_` if the topic has been deleted. - string topic = 2 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; - - // If push delivery is used with this subscription, this field is - // used to configure it. An empty `pushConfig` signifies that the subscriber - // will pull and ack messages using API methods. - PushConfig push_config = 4; - - // The approximate amount of time (on a best-effort basis) Pub/Sub waits for - // the subscriber to acknowledge receipt before resending the message. In the - // interval after the message is delivered and before it is acknowledged, it - // is considered to be outstanding. During that time period, the - // message will not be redelivered (on a best-effort basis). - // - // For pull subscriptions, this value is used as the initial value for the ack - // deadline. To override this value for a given message, call - // `ModifyAckDeadline` with the corresponding `ack_id` if using - // non-streaming pull or send the `ack_id` in a - // `StreamingModifyAckDeadlineRequest` if using streaming pull. - // The minimum custom deadline you can specify is 10 seconds. - // The maximum custom deadline you can specify is 600 seconds (10 minutes). - // If this parameter is 0, a default value of 10 seconds is used. - // - // For push delivery, this value is also used to set the request timeout for - // the call to the push endpoint. - // - // If the subscriber never acknowledges the message, the Pub/Sub - // system will eventually redeliver the message. - int32 ack_deadline_seconds = 5; - - // Indicates whether to retain acknowledged messages. If true, then - // messages are not expunged from the subscription's backlog, even if they are - // acknowledged, until they fall out of the `message_retention_duration` - // window. This must be true if you would like to [Seek to a timestamp] - // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time). - bool retain_acked_messages = 7; - - // How long to retain unacknowledged messages in the subscription's backlog, - // from the moment a message is published. - // If `retain_acked_messages` is true, then this also configures the retention - // of acknowledged messages, and thus configures how far back in time a `Seek` - // can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 - // minutes. - google.protobuf.Duration message_retention_duration = 8; - - // See Creating and - // managing labels. - map labels = 9; - - // If true, messages published with the same `ordering_key` in `PubsubMessage` - // will be delivered to the subscribers in the order in which they - // are received by the Pub/Sub system. Otherwise, they may be delivered in - // any order. - bool enable_message_ordering = 10; - - // A policy that specifies the conditions for this subscription's expiration. - // A subscription is considered active as long as any connected subscriber is - // successfully consuming messages from the subscription or is issuing - // operations on the subscription. If `expiration_policy` is not set, a - // *default policy* with `ttl` of 31 days will be used. The minimum allowed - // value for `expiration_policy.ttl` is 1 day. - ExpirationPolicy expiration_policy = 11; - - // An expression written in the Pub/Sub [filter - // language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, - // then only `PubsubMessage`s whose `attributes` field matches the filter are - // delivered on this subscription. If empty, then no messages are filtered - // out. - string filter = 12; - - // A policy that specifies the conditions for dead lettering messages in - // this subscription. If dead_letter_policy is not set, dead lettering - // is disabled. - // - // The Cloud Pub/Sub service account associated with this subscriptions's - // parent project (i.e., - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - // permission to Acknowledge() messages on this subscription. - DeadLetterPolicy dead_letter_policy = 13; - - // A policy that specifies how Pub/Sub retries message delivery for this - // subscription. - // - // If not set, the default retry policy is applied. This generally implies - // that messages will be retried as soon as possible for healthy subscribers. - // RetryPolicy will be triggered on NACKs or acknowledgement deadline - // exceeded events for a given message. - RetryPolicy retry_policy = 14; - - // Indicates whether the subscription is detached from its topic. Detached - // subscriptions don't receive messages from their topic and don't retain any - // backlog. `Pull` and `StreamingPull` requests will return - // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to - // the endpoint will not be made. - bool detached = 15; -} - -// A policy that specifies how Cloud Pub/Sub retries message delivery. -// -// Retry delay will be exponential based on provided minimum and maximum -// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. -// -// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded -// events for a given message. -// -// Retry Policy is implemented on a best effort basis. At times, the delay -// between consecutive deliveries may not match the configuration. That is, -// delay can be more or less than configured backoff. -message RetryPolicy { - // The minimum delay between consecutive deliveries of a given message. - // Value should be between 0 and 600 seconds. Defaults to 10 seconds. - google.protobuf.Duration minimum_backoff = 1; - - // The maximum delay between consecutive deliveries of a given message. - // Value should be between 0 and 600 seconds. Defaults to 600 seconds. - google.protobuf.Duration maximum_backoff = 2; -} - -// Dead lettering is done on a best effort basis. The same message might be -// dead lettered multiple times. -// -// If validation on any of the fields fails at subscription creation/updation, -// the create/update subscription request will fail. -message DeadLetterPolicy { - // The name of the topic to which dead letter messages should be published. - // Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service - // account associated with the enclosing subscription's parent project (i.e., - // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have - // permission to Publish() to this topic. - // - // The operation will fail if the topic does not exist. - // Users should ensure that there is a subscription attached to this topic - // since messages published to a topic with no subscriptions are lost. - string dead_letter_topic = 1; - - // The maximum number of delivery attempts for any message. The value must be - // between 5 and 100. - // - // The number of delivery attempts is defined as 1 + (the sum of number of - // NACKs and number of times the acknowledgement deadline has been exceeded - // for the message). - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that - // client libraries may automatically extend ack_deadlines. - // - // This field will be honored on a best effort basis. - // - // If this parameter is 0, a default value of 5 is used. - int32 max_delivery_attempts = 2; -} - -// A policy that specifies the conditions for resource expiration (i.e., -// automatic resource deletion). -message ExpirationPolicy { - // Specifies the "time-to-live" duration for an associated resource. The - // resource expires if it is not active for a period of `ttl`. The definition - // of "activity" depends on the type of the associated resource. The minimum - // and maximum allowed values for `ttl` depend on the type of the associated - // resource, as well. If `ttl` is not set, the associated resource never - // expires. - google.protobuf.Duration ttl = 1; -} - -// Configuration for a push delivery endpoint. -message PushConfig { - // Contains information needed for generating an - // [OpenID Connect - // token](https://developers.google.com/identity/protocols/OpenIDConnect). - message OidcToken { - // [Service account - // email](https://cloud.google.com/iam/docs/service-accounts) - // to be used for generating the OIDC token. The caller (for - // CreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) must - // have the iam.serviceAccounts.actAs permission for the service account. - string service_account_email = 1; - - // Audience to be used when generating OIDC token. The audience claim - // identifies the recipients that the JWT is intended for. The audience - // value is a single case-sensitive string. Having multiple values (array) - // for the audience field is not supported. More info about the OIDC JWT - // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 - // Note: if not specified, the Push endpoint URL will be used. - string audience = 2; - } - - // A URL locating the endpoint to which messages should be pushed. - // For example, a Webhook endpoint might use `https://example.com/push`. - string push_endpoint = 1; - - // Endpoint configuration attributes that can be used to control different - // aspects of the message delivery. - // - // The only currently supported attribute is `x-goog-version`, which you can - // use to change the format of the pushed message. This attribute - // indicates the version of the data expected by the endpoint. This - // controls the shape of the pushed message (i.e., its fields and metadata). - // - // If not present during the `CreateSubscription` call, it will default to - // the version of the Pub/Sub API used to make such call. If not present in a - // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` - // calls will always return a valid version, even if the subscription was - // created without this attribute. - // - // The only supported values for the `x-goog-version` attribute are: - // - // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. - // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. - // - // For example: - //

attributes { "x-goog-version": "v1" } 
- map attributes = 2; - - // An authentication method used by push endpoints to verify the source of - // push requests. This can be used with push endpoints that are private by - // default to allow requests only from the Cloud Pub/Sub system, for example. - // This field is optional and should be set only by users interested in - // authenticated push. - oneof authentication_method { - // If specified, Pub/Sub will generate and attach an OIDC JWT token as an - // `Authorization` header in the HTTP request for every pushed message. - OidcToken oidc_token = 3; - } -} - -// A message and its corresponding acknowledgment ID. -message ReceivedMessage { - // This ID can be used to acknowledge the received message. - string ack_id = 1; - - // The message. - PubsubMessage message = 2; - - // The approximate number of times that Cloud Pub/Sub has attempted to deliver - // the associated message to a subscriber. - // - // More precisely, this is 1 + (number of NACKs) + - // (number of ack_deadline exceeds) for this message. - // - // A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline - // exceeds event is whenever a message is not acknowledged within - // ack_deadline. Note that ack_deadline is initially - // Subscription.ackDeadlineSeconds, but may get extended automatically by - // the client library. - // - // Upon the first delivery of a given message, `delivery_attempt` will have a - // value of 1. The value is calculated at best effort and is approximate. - // - // If a DeadLetterPolicy is not set on the subscription, this will be 0. - int32 delivery_attempt = 3; -} - -// Request for the GetSubscription method. -message GetSubscriptionRequest { - // Required. The name of the subscription to get. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; -} - -// Request for the UpdateSubscription method. -message UpdateSubscriptionRequest { - // Required. The updated subscription object. - Subscription subscription = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Indicates which fields in the provided subscription to update. - // Must be specified and non-empty. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; -} - -// Request for the `ListSubscriptions` method. -message ListSubscriptionsRequest { - // Required. The name of the project in which to list subscriptions. - // Format is `projects/{project-id}`. - string project = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Maximum number of subscriptions to return. - int32 page_size = 2; - - // The value returned by the last `ListSubscriptionsResponse`; indicates that - // this is a continuation of a prior `ListSubscriptions` call, and that the - // system should return the next page of data. - string page_token = 3; -} - -// Response for the `ListSubscriptions` method. -message ListSubscriptionsResponse { - // The subscriptions that match the request. - repeated Subscription subscriptions = 1; - - // If not empty, indicates that there may be more subscriptions that match - // the request; this value should be passed in a new - // `ListSubscriptionsRequest` to get more subscriptions. - string next_page_token = 2; -} - -// Request for the DeleteSubscription method. -message DeleteSubscriptionRequest { - // Required. The subscription to delete. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; -} - -// Request for the ModifyPushConfig method. -message ModifyPushConfigRequest { - // Required. The name of the subscription. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // Required. The push configuration for future deliveries. - // - // An empty `pushConfig` indicates that the Pub/Sub system should - // stop pushing messages from the given subscription and allow - // messages to be pulled and acknowledged - effectively pausing - // the subscription if `Pull` or `StreamingPull` is not called. - PushConfig push_config = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request for the `Pull` method. -message PullRequest { - // Required. The subscription from which messages should be pulled. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // Optional. If this field set to true, the system will respond immediately - // even if it there are no messages available to return in the `Pull` - // response. Otherwise, the system may wait (for a bounded amount of time) - // until at least one message is available, rather than returning no messages. - // Warning: setting this field to `true` is discouraged because it adversely - // impacts the performance of `Pull` operations. We recommend that users do - // not set this field. - bool return_immediately = 2 - [deprecated = true, (google.api.field_behavior) = OPTIONAL]; - - // Required. The maximum number of messages to return for this request. Must - // be a positive integer. The Pub/Sub system may return fewer than the number - // specified. - int32 max_messages = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Response for the `Pull` method. -message PullResponse { - // Received Pub/Sub messages. The list will be empty if there are no more - // messages available in the backlog. For JSON, the response can be entirely - // empty. The Pub/Sub system may return fewer than the `maxMessages` requested - // even if there are more messages available in the backlog. - repeated ReceivedMessage received_messages = 1; -} - -// Request for the ModifyAckDeadline method. -message ModifyAckDeadlineRequest { - // Required. The name of the subscription. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // Required. List of acknowledgment IDs. - repeated string ack_ids = 4 [(google.api.field_behavior) = REQUIRED]; - - // Required. The new ack deadline with respect to the time this request was - // sent to the Pub/Sub system. For example, if the value is 10, the new ack - // deadline will expire 10 seconds after the `ModifyAckDeadline` call was - // made. Specifying zero might immediately make the message available for - // delivery to another subscriber client. This typically results in an - // increase in the rate of message redeliveries (that is, duplicates). - // The minimum deadline you can specify is 0 seconds. - // The maximum deadline you can specify is 600 seconds (10 minutes). - int32 ack_deadline_seconds = 3 [(google.api.field_behavior) = REQUIRED]; -} - -// Request for the Acknowledge method. -message AcknowledgeRequest { - // Required. The subscription whose message is being acknowledged. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // Required. The acknowledgment ID for the messages being acknowledged that - // was returned by the Pub/Sub system in the `Pull` response. Must not be - // empty. - repeated string ack_ids = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Request for the `StreamingPull` streaming RPC method. This request is used to -// establish the initial stream as well as to stream acknowledgements and ack -// deadline modifications from the client to the server. -message StreamingPullRequest { - // Required. The subscription for which to initialize the new stream. This - // must be provided in the first request on the stream, and must not be set in - // subsequent requests from client to server. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // List of acknowledgement IDs for acknowledging previously received messages - // (received on this stream or a different stream). If an ack ID has expired, - // the corresponding message may be redelivered later. Acknowledging a message - // more than once will not result in an error. If the acknowledgement ID is - // malformed, the stream will be aborted with status `INVALID_ARGUMENT`. - repeated string ack_ids = 2; - - // The list of new ack deadlines for the IDs listed in - // `modify_deadline_ack_ids`. The size of this list must be the same as the - // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted - // with `INVALID_ARGUMENT`. Each element in this list is applied to the - // element in the same position in `modify_deadline_ack_ids`. The new ack - // deadline is with respect to the time this request was sent to the Pub/Sub - // system. Must be >= 0. For example, if the value is 10, the new ack deadline - // will expire 10 seconds after this request is received. If the value is 0, - // the message is immediately made available for another streaming or - // non-streaming pull request. If the value is < 0 (an error), the stream will - // be aborted with status `INVALID_ARGUMENT`. - repeated int32 modify_deadline_seconds = 3; - - // List of acknowledgement IDs whose deadline will be modified based on the - // corresponding element in `modify_deadline_seconds`. This field can be used - // to indicate that more time is needed to process a message by the - // subscriber, or to make the message available for redelivery if the - // processing was interrupted. - repeated string modify_deadline_ack_ids = 4; - - // Required. The ack deadline to use for the stream. This must be provided in - // the first request on the stream, but it can also be updated on subsequent - // requests from client to server. The minimum deadline you can specify is 10 - // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). - int32 stream_ack_deadline_seconds = 5 - [(google.api.field_behavior) = REQUIRED]; - - // A unique identifier that is used to distinguish client instances from each - // other. Only needs to be provided on the initial request. When a stream - // disconnects and reconnects for the same stream, the client_id should be set - // to the same value so that state associated with the old stream can be - // transferred to the new stream. The same client_id should not be used for - // different client instances. - string client_id = 6; - - // Flow control settings for the maximum number of outstanding messages. When - // there are `max_outstanding_messages` or more currently sent to the - // streaming pull client that have not yet been acked or nacked, the server - // stops sending more messages. The sending of messages resumes once the - // number of outstanding messages is less than this value. If the value is - // <= 0, there is no limit to the number of outstanding messages. This - // property can only be set on the initial StreamingPullRequest. If it is set - // on a subsequent request, the stream will be aborted with status - // `INVALID_ARGUMENT`. - int64 max_outstanding_messages = 7; - - // Flow control settings for the maximum number of outstanding bytes. When - // there are `max_outstanding_bytes` or more worth of messages currently sent - // to the streaming pull client that have not yet been acked or nacked, the - // server will stop sending more messages. The sending of messages resumes - // once the number of outstanding bytes is less than this value. If the value - // is <= 0, there is no limit to the number of outstanding bytes. This - // property can only be set on the initial StreamingPullRequest. If it is set - // on a subsequent request, the stream will be aborted with status - // `INVALID_ARGUMENT`. - int64 max_outstanding_bytes = 8; -} - -// Response for the `StreamingPull` method. This response is used to stream -// messages from the server to the client. -message StreamingPullResponse { - // Received Pub/Sub messages. This will not be empty. - repeated ReceivedMessage received_messages = 1; -} - -// Request for the `CreateSnapshot` method. -message CreateSnapshotRequest { - // Required. User-provided name for this snapshot. If the name is not provided - // in the request, the server will assign a random name for this snapshot on - // the same project as the subscription. Note that for REST API requests, you - // must specify a name. See the resource - // name rules. Format is `projects/{project}/snapshots/{snap}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } - ]; - - // Required. The subscription whose backlog the snapshot retains. - // Specifically, the created snapshot is guaranteed to retain: - // (a) The existing backlog on the subscription. More precisely, this is - // defined as the messages in the subscription's backlog that are - // unacknowledged upon the successful completion of the - // `CreateSnapshot` request; as well as: - // (b) Any messages published to the subscription's topic following the - // successful completion of the CreateSnapshot request. - // Format is `projects/{project}/subscriptions/{sub}`. - string subscription = 2 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - // See Creating and - // managing labels. - map labels = 3; -} - -// Request for the UpdateSnapshot method. -message UpdateSnapshotRequest { - // Required. The updated snapshot object. - Snapshot snapshot = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. Indicates which fields in the provided snapshot to update. - // Must be specified and non-empty. - google.protobuf.FieldMask update_mask = 2 - [(google.api.field_behavior) = REQUIRED]; -} - -// A snapshot resource. Snapshots are used in -// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) -// operations, which allow you to manage message acknowledgments in bulk. That -// is, you can set the acknowledgment state of messages in an existing -// subscription to the state captured by a snapshot. -message Snapshot { - option (google.api.resource) = { - type: "pubsub.googleapis.com/Snapshot" - pattern: "projects/{project}/snapshots/{snapshot}" - }; - - // The name of the snapshot. - string name = 1; - - // The name of the topic from which this snapshot is retaining messages. - string topic = 2 [ - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Topic" } - ]; - - // The snapshot is guaranteed to exist up until this time. - // A newly-created snapshot expires no later than 7 days from the time of its - // creation. Its exact lifetime is determined at creation by the existing - // backlog in the source subscription. Specifically, the lifetime of the - // snapshot is `7 days - (age of oldest unacked message in the subscription)`. - // For example, consider a subscription whose oldest unacked message is 3 days - // old. If a snapshot is created from this subscription, the snapshot -- which - // will always capture this 3-day-old backlog as long as the snapshot - // exists -- will expire in 4 days. The service will refuse to create a - // snapshot that would expire in less than 1 hour after creation. - google.protobuf.Timestamp expire_time = 3; - - // See [Creating and managing labels] - // (https://cloud.google.com/pubsub/docs/labels). - map labels = 4; -} - -// Request for the GetSnapshot method. -message GetSnapshotRequest { - // Required. The name of the snapshot to get. - // Format is `projects/{project}/snapshots/{snap}`. - string snapshot = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } - ]; -} - -// Request for the `ListSnapshots` method. -message ListSnapshotsRequest { - // Required. The name of the project in which to list snapshots. - // Format is `projects/{project-id}`. - string project = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Maximum number of snapshots to return. - int32 page_size = 2; - - // The value returned by the last `ListSnapshotsResponse`; indicates that this - // is a continuation of a prior `ListSnapshots` call, and that the system - // should return the next page of data. - string page_token = 3; -} - -// Response for the `ListSnapshots` method. -message ListSnapshotsResponse { - // The resulting snapshots. - repeated Snapshot snapshots = 1; - - // If not empty, indicates that there may be more snapshot that match the - // request; this value should be passed in a new `ListSnapshotsRequest`. - string next_page_token = 2; -} - -// Request for the `DeleteSnapshot` method. -message DeleteSnapshotRequest { - // Required. The name of the snapshot to delete. - // Format is `projects/{project}/snapshots/{snap}`. - string snapshot = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Snapshot" } - ]; -} - -// Request for the `Seek` method. -message SeekRequest { - // Required. The subscription to affect. - string subscription = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "pubsub.googleapis.com/Subscription" - } - ]; - - oneof target { - // The time to seek to. - // Messages retained in the subscription that were published before this - // time are marked as acknowledged, and messages retained in the - // subscription that were published after this time are marked as - // unacknowledged. Note that this operation affects only those messages - // retained in the subscription (configured by the combination of - // `message_retention_duration` and `retain_acked_messages`). For example, - // if `time` corresponds to a point before the message retention - // window (or to a point before the system's notion of the subscription - // creation time), only retained messages will be marked as unacknowledged, - // and already-expunged messages will not be restored. - google.protobuf.Timestamp time = 2; - - // The snapshot to seek to. The snapshot's topic must be the same as that of - // the provided subscription. - // Format is `projects/{project}/snapshots/{snap}`. - string snapshot = 3 [(google.api.resource_reference) = { - type: "pubsub.googleapis.com/Snapshot" - }]; - } -} - -// Response for the `Seek` method (this response is empty). -message SeekResponse {} diff --git a/flyrs/protos/google/pubsub/v1/schema.proto b/flyrs/protos/google/pubsub/v1/schema.proto deleted file mode 100644 index ae402ac4de..0000000000 --- a/flyrs/protos/google/pubsub/v1/schema.proto +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.pubsub.v1; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/protobuf/empty.proto"; - -option cc_enable_arenas = true; -option csharp_namespace = "Google.Cloud.PubSub.V1"; -option go_package = "google.golang.org/genproto/googleapis/pubsub/v1;pubsub"; -option java_multiple_files = true; -option java_outer_classname = "SchemaProto"; -option java_package = "com.google.pubsub.v1"; -option php_namespace = "Google\\Cloud\\PubSub\\V1"; -option ruby_package = "Google::Cloud::PubSub::V1"; - -// Service for doing schema-related operations. -// -// EXPERIMENTAL: The Schema service is in development and may not work yet. - -service SchemaService { - option (google.api.default_host) = "pubsub.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/cloud-platform," - "https://www.googleapis.com/auth/pubsub"; - - // Creates a schema. - rpc CreateSchema(CreateSchemaRequest) returns (Schema) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/schemas" - body: "schema" - }; - option (google.api.method_signature) = "parent,schema,schema_id"; - } - - // Gets a schema. - rpc GetSchema(GetSchemaRequest) returns (Schema) { - option (google.api.http) = { - get: "/v1/{name=projects/*/schemas/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Lists schemas in a project. - rpc ListSchemas(ListSchemasRequest) returns (ListSchemasResponse) { - option (google.api.http) = { - get: "/v1/{parent=projects/*}/schemas" - }; - option (google.api.method_signature) = "parent"; - } - - // Deletes a schema. - rpc DeleteSchema(DeleteSchemaRequest) returns (google.protobuf.Empty) { - option (google.api.http) = { - delete: "/v1/{name=projects/*/schemas/*}" - }; - option (google.api.method_signature) = "name"; - } - - // Validates a schema. - rpc ValidateSchema(ValidateSchemaRequest) returns (ValidateSchemaResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/schemas:validate" - body: "*" - }; - option (google.api.method_signature) = "parent,schema"; - } - - // Validates a message against a schema. - rpc ValidateMessage(ValidateMessageRequest) - returns (ValidateMessageResponse) { - option (google.api.http) = { - post: "/v1/{parent=projects/*}/schemas:validateMessage" - body: "*" - }; - } -} - -// A schema resource. -message Schema { - option (google.api.resource) = { - type: "pubsub.googleapis.com/Schema" - pattern: "projects/{project}/schemas/{schema}" - }; - - // Possible schema definition types. - enum Type { - // Default value. This value is unused. - TYPE_UNSPECIFIED = 0; - - // A Protocol Buffer schema definition. - PROTOCOL_BUFFER = 1; - - // An Avro schema definition. - AVRO = 2; - } - - // Required. Name of the schema. - // Format is `projects/{project}/schemas/{schema}`. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // The type of the schema definition. - Type type = 2; - - // The definition of the schema. This should contain a string representing - // the full definition of the schema that is a valid schema definition of - // the type specified in `type`. - string definition = 3; -} - -// Request for the CreateSchema method. -message CreateSchemaRequest { - // Required. The name of the project in which to create the schema. - // Format is `projects/{project-id}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - child_type: "pubsub.googleapis.com/Schema" - } - ]; - - // Required. The schema object to create. - // - // This schema's `name` parameter is ignored. The schema object returned - // by CreateSchema will have a `name` made using the given `parent` and - // `schema_id`. - Schema schema = 2 [(google.api.field_behavior) = REQUIRED]; - - // The ID to use for the schema, which will become the final component of - // the schema's resource name. - // - // See https://cloud.google.com/pubsub/docs/admin#resource_names for resource - // name constraints. - string schema_id = 3; -} - -// View of Schema object fields to be returned by GetSchema and ListSchemas. -enum SchemaView { - // The default / unset value. - // The API will default to the BASIC view. - SCHEMA_VIEW_UNSPECIFIED = 0; - - // Include the name and type of the schema, but not the definition. - BASIC = 1; - - // Include all Schema object fields. - FULL = 2; -} - -// Request for the GetSchema method. -message GetSchemaRequest { - // Required. The name of the schema to get. - // Format is `projects/{project}/schemas/{schema}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } - ]; - - // The set of fields to return in the response. If not set, returns a Schema - // with `name` and `type`, but not `definition`. Set to `FULL` to retrieve all - // fields. - SchemaView view = 2; -} - -// Request for the `ListSchemas` method. -message ListSchemasRequest { - // Required. The name of the project in which to list schemas. - // Format is `projects/{project-id}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // The set of Schema fields to return in the response. If not set, returns - // Schemas with `name` and `type`, but not `definition`. Set to `FULL` to - // retrieve all fields. - SchemaView view = 2; - - // Maximum number of schemas to return. - int32 page_size = 3; - - // The value returned by the last `ListSchemasResponse`; indicates that - // this is a continuation of a prior `ListSchemas` call, and that the - // system should return the next page of data. - string page_token = 4; -} - -// Response for the `ListSchemas` method. -message ListSchemasResponse { - // The resulting schemas. - repeated Schema schemas = 1; - - // If not empty, indicates that there may be more schemas that match the - // request; this value should be passed in a new `ListSchemasRequest`. - string next_page_token = 2; -} - -// Request for the `DeleteSchema` method. -message DeleteSchemaRequest { - // Required. Name of the schema to delete. - // Format is `projects/{project}/schemas/{schema}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } - ]; -} - -// Request for the `ValidateSchema` method. -message ValidateSchemaRequest { - // Required. The name of the project in which to validate schemas. - // Format is `projects/{project-id}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - // Required. The schema object to validate. - Schema schema = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response for the `ValidateSchema` method. -message ValidateSchemaResponse {} - -// Request for the `ValidateMessage` method. -message ValidateMessageRequest { - // Required. The name of the project in which to validate schemas. - // Format is `projects/{project-id}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "cloudresourcemanager.googleapis.com/Project" - } - ]; - - oneof schema_spec { - // Name of the schema against which to validate. - // - // Format is `projects/{project}/schemas/{schema}`. - string name = 2 [ - (google.api.resource_reference) = { type: "pubsub.googleapis.com/Schema" } - ]; - - // Ad-hoc schema against which to validate - Schema schema = 3; - } - - // Message to validate against the provided `schema_spec`. - bytes message = 4; - - // The encoding expected for messages - Encoding encoding = 5; -} - -// Response for the `ValidateMessage` method. -message ValidateMessageResponse {} - -// Possible encoding types for messages. -enum Encoding { - // Unspecified - ENCODING_UNSPECIFIED = 0; - - // JSON encoding - JSON = 1; - - // Binary encoding, as defined by the schema type. For some schema types, - // binary encoding may not be available. - BINARY = 2; -} diff --git a/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto b/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto deleted file mode 100644 index d63d3c87eb..0000000000 --- a/flyrs/protos/protoc-gen-openapiv2/options/annotations.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; - -package grpc.gateway.protoc_gen_openapiv2.options; - -import "google/protobuf/descriptor.proto"; -import "protoc-gen-openapiv2/options/openapiv2.proto"; - -option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; - -extend google.protobuf.FileOptions { - // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Swagger openapiv2_swagger = 1042; -} -extend google.protobuf.MethodOptions { - // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Operation openapiv2_operation = 1042; -} -extend google.protobuf.MessageOptions { - // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Schema openapiv2_schema = 1042; -} -extend google.protobuf.ServiceOptions { - // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - Tag openapiv2_tag = 1042; -} -extend google.protobuf.FieldOptions { - // ID assigned by protobuf-global-extension-registry@google.com for gRPC-Gateway project. - // - // All IDs are the same, as assigned. It is okay that they are the same, as they extend - // different descriptor messages. - JSONSchema openapiv2_field = 1042; -} diff --git a/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto b/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto deleted file mode 100644 index 9a17f021ce..0000000000 --- a/flyrs/protos/protoc-gen-openapiv2/options/openapiv2.proto +++ /dev/null @@ -1,720 +0,0 @@ -syntax = "proto3"; - -package grpc.gateway.protoc_gen_openapiv2.options; - -import "google/protobuf/struct.proto"; - -option go_package = "github.com/grpc-ecosystem/grpc-gateway/v2/protoc-gen-openapiv2/options"; - -// Scheme describes the schemes supported by the OpenAPI Swagger -// and Operation objects. -enum Scheme { - UNKNOWN = 0; - HTTP = 1; - HTTPS = 2; - WS = 3; - WSS = 4; -} - -// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#swaggerObject -// -// Example: -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -// info: { -// title: "Echo API"; -// version: "1.0"; -// description: ""; -// contact: { -// name: "gRPC-Gateway project"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway"; -// email: "none@example.com"; -// }; -// license: { -// name: "BSD 3-Clause License"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; -// }; -// }; -// schemes: HTTPS; -// consumes: "application/json"; -// produces: "application/json"; -// }; -// -message Swagger { - // Specifies the OpenAPI Specification version being used. It can be - // used by the OpenAPI UI and other clients to interpret the API listing. The - // value MUST be "2.0". - string swagger = 1; - // Provides metadata about the API. The metadata can be used by the - // clients if needed. - Info info = 2; - // The host (name or ip) serving the API. This MUST be the host only and does - // not include the scheme nor sub-paths. It MAY include a port. If the host is - // not included, the host serving the documentation is to be used (including - // the port). The host does not support path templating. - string host = 3; - // The base path on which the API is served, which is relative to the host. If - // it is not included, the API is served directly under the host. The value - // MUST start with a leading slash (/). The basePath does not support path - // templating. - // Note that using `base_path` does not change the endpoint paths that are - // generated in the resulting OpenAPI file. If you wish to use `base_path` - // with relatively generated OpenAPI paths, the `base_path` prefix must be - // manually removed from your `google.api.http` paths and your code changed to - // serve the API from the `base_path`. - string base_path = 4; - // The transfer protocol of the API. Values MUST be from the list: "http", - // "https", "ws", "wss". If the schemes is not included, the default scheme to - // be used is the one used to access the OpenAPI definition itself. - repeated Scheme schemes = 5; - // A list of MIME types the APIs can consume. This is global to all APIs but - // can be overridden on specific API calls. Value MUST be as described under - // Mime Types. - repeated string consumes = 6; - // A list of MIME types the APIs can produce. This is global to all APIs but - // can be overridden on specific API calls. Value MUST be as described under - // Mime Types. - repeated string produces = 7; - // field 8 is reserved for 'paths'. - reserved 8; - // field 9 is reserved for 'definitions', which at this time are already - // exposed as and customizable as proto messages. - reserved 9; - // An object to hold responses that can be used across operations. This - // property does not define global responses for all operations. - map responses = 10; - // Security scheme definitions that can be used across the specification. - SecurityDefinitions security_definitions = 11; - // A declaration of which security schemes are applied for the API as a whole. - // The list of values describes alternative security schemes that can be used - // (that is, there is a logical OR between the security requirements). - // Individual operations can override this definition. - repeated SecurityRequirement security = 12; - // A list of tags for API documentation control. Tags can be used for logical - // grouping of operations by resources or any other qualifier. - repeated Tag tags = 13; - // Additional external documentation. - ExternalDocumentation external_docs = 14; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 15; -} - -// `Operation` is a representation of OpenAPI v2 specification's Operation object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#operationObject -// -// Example: -// -// service EchoService { -// rpc Echo(SimpleMessage) returns (SimpleMessage) { -// option (google.api.http) = { -// get: "/v1/example/echo/{id}" -// }; -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { -// summary: "Get a message."; -// operation_id: "getMessage"; -// tags: "echo"; -// responses: { -// key: "200" -// value: { -// description: "OK"; -// } -// } -// }; -// } -// } -message Operation { - // A list of tags for API documentation control. Tags can be used for logical - // grouping of operations by resources or any other qualifier. - repeated string tags = 1; - // A short summary of what the operation does. For maximum readability in the - // swagger-ui, this field SHOULD be less than 120 characters. - string summary = 2; - // A verbose explanation of the operation behavior. GFM syntax can be used for - // rich text representation. - string description = 3; - // Additional external documentation for this operation. - ExternalDocumentation external_docs = 4; - // Unique string used to identify the operation. The id MUST be unique among - // all operations described in the API. Tools and libraries MAY use the - // operationId to uniquely identify an operation, therefore, it is recommended - // to follow common programming naming conventions. - string operation_id = 5; - // A list of MIME types the operation can consume. This overrides the consumes - // definition at the OpenAPI Object. An empty value MAY be used to clear the - // global definition. Value MUST be as described under Mime Types. - repeated string consumes = 6; - // A list of MIME types the operation can produce. This overrides the produces - // definition at the OpenAPI Object. An empty value MAY be used to clear the - // global definition. Value MUST be as described under Mime Types. - repeated string produces = 7; - // field 8 is reserved for 'parameters'. - reserved 8; - // The list of possible responses as they are returned from executing this - // operation. - map responses = 9; - // The transfer protocol for the operation. Values MUST be from the list: - // "http", "https", "ws", "wss". The value overrides the OpenAPI Object - // schemes definition. - repeated Scheme schemes = 10; - // Declares this operation to be deprecated. Usage of the declared operation - // should be refrained. Default value is false. - bool deprecated = 11; - // A declaration of which security schemes are applied for this operation. The - // list of values describes alternative security schemes that can be used - // (that is, there is a logical OR between the security requirements). This - // definition overrides any declared top-level security. To remove a top-level - // security declaration, an empty array can be used. - repeated SecurityRequirement security = 12; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 13; - // Custom parameters such as HTTP request headers. - // See: https://swagger.io/docs/specification/2-0/describing-parameters/ - // and https://swagger.io/specification/v2/#parameter-object. - Parameters parameters = 14; -} - -// `Parameters` is a representation of OpenAPI v2 specification's parameters object. -// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only -// allow header parameters to be set here since we do not want users specifying custom non-header -// parameters beyond those inferred from the Protobuf schema. -// See: https://swagger.io/specification/v2/#parameter-object -message Parameters { - // `Headers` is one or more HTTP header parameter. - // See: https://swagger.io/docs/specification/2-0/describing-parameters/#header-parameters - repeated HeaderParameter headers = 1; -} - -// `HeaderParameter` a HTTP header parameter. -// See: https://swagger.io/specification/v2/#parameter-object -message HeaderParameter { - // `Type` is a a supported HTTP header type. - // See https://swagger.io/specification/v2/#parameterType. - enum Type { - UNKNOWN = 0; - STRING = 1; - NUMBER = 2; - INTEGER = 3; - BOOLEAN = 4; - } - - // `Name` is the header name. - string name = 1; - // `Description` is a short description of the header. - string description = 2; - // `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. - // See: https://swagger.io/specification/v2/#parameterType. - Type type = 3; - // `Format` The extending format for the previously mentioned type. - string format = 4; - // `Required` indicates if the header is optional - bool required = 5; - // field 6 is reserved for 'items', but in OpenAPI-specific way. - reserved 6; - // field 7 is reserved `Collection Format`. Determines the format of the array if type array is used. - reserved 7; -} - -// `Header` is a representation of OpenAPI v2 specification's Header object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#headerObject -// -message Header { - // `Description` is a short description of the header. - string description = 1; - // The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. - string type = 2; - // `Format` The extending format for the previously mentioned type. - string format = 3; - // field 4 is reserved for 'items', but in OpenAPI-specific way. - reserved 4; - // field 5 is reserved `Collection Format` Determines the format of the array if type array is used. - reserved 5; - // `Default` Declares the value of the header that the server will use if none is provided. - // See: https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-6.2. - // Unlike JSON Schema this value MUST conform to the defined type for the header. - string default = 6; - // field 7 is reserved for 'maximum'. - reserved 7; - // field 8 is reserved for 'exclusiveMaximum'. - reserved 8; - // field 9 is reserved for 'minimum'. - reserved 9; - // field 10 is reserved for 'exclusiveMinimum'. - reserved 10; - // field 11 is reserved for 'maxLength'. - reserved 11; - // field 12 is reserved for 'minLength'. - reserved 12; - // 'Pattern' See https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.2.3. - string pattern = 13; - // field 14 is reserved for 'maxItems'. - reserved 14; - // field 15 is reserved for 'minItems'. - reserved 15; - // field 16 is reserved for 'uniqueItems'. - reserved 16; - // field 17 is reserved for 'enum'. - reserved 17; - // field 18 is reserved for 'multipleOf'. - reserved 18; -} - -// `Response` is a representation of OpenAPI v2 specification's Response object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#responseObject -// -message Response { - // `Description` is a short description of the response. - // GFM syntax can be used for rich text representation. - string description = 1; - // `Schema` optionally defines the structure of the response. - // If `Schema` is not provided, it means there is no content to the response. - Schema schema = 2; - // `Headers` A list of headers that are sent with the response. - // `Header` name is expected to be a string in the canonical format of the MIME header key - // See: https://golang.org/pkg/net/textproto/#CanonicalMIMEHeaderKey - map headers = 3; - // `Examples` gives per-mimetype response examples. - // See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#example-object - map examples = 4; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 5; -} - -// `Info` is a representation of OpenAPI v2 specification's Info object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#infoObject -// -// Example: -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -// info: { -// title: "Echo API"; -// version: "1.0"; -// description: ""; -// contact: { -// name: "gRPC-Gateway project"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway"; -// email: "none@example.com"; -// }; -// license: { -// name: "BSD 3-Clause License"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; -// }; -// }; -// ... -// }; -// -message Info { - // The title of the application. - string title = 1; - // A short description of the application. GFM syntax can be used for rich - // text representation. - string description = 2; - // The Terms of Service for the API. - string terms_of_service = 3; - // The contact information for the exposed API. - Contact contact = 4; - // The license information for the exposed API. - License license = 5; - // Provides the version of the application API (not to be confused - // with the specification version). - string version = 6; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 7; -} - -// `Contact` is a representation of OpenAPI v2 specification's Contact object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#contactObject -// -// Example: -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -// info: { -// ... -// contact: { -// name: "gRPC-Gateway project"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway"; -// email: "none@example.com"; -// }; -// ... -// }; -// ... -// }; -// -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. MUST be in the format of a - // URL. - string url = 2; - // The email address of the contact person/organization. MUST be in the format - // of an email address. - string email = 3; -} - -// `License` is a representation of OpenAPI v2 specification's License object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#licenseObject -// -// Example: -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -// info: { -// ... -// license: { -// name: "BSD 3-Clause License"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway/blob/main/LICENSE"; -// }; -// ... -// }; -// ... -// }; -// -message License { - // The license name used for the API. - string name = 1; - // A URL to the license used for the API. MUST be in the format of a URL. - string url = 2; -} - -// `ExternalDocumentation` is a representation of OpenAPI v2 specification's -// ExternalDocumentation object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#externalDocumentationObject -// -// Example: -// -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -// ... -// external_docs: { -// description: "More about gRPC-Gateway"; -// url: "https://github.com/grpc-ecosystem/grpc-gateway"; -// } -// ... -// }; -// -message ExternalDocumentation { - // A short description of the target documentation. GFM syntax can be used for - // rich text representation. - string description = 1; - // The URL for the target documentation. Value MUST be in the format - // of a URL. - string url = 2; -} - -// `Schema` is a representation of OpenAPI v2 specification's Schema object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject -// -message Schema { - JSONSchema json_schema = 1; - // Adds support for polymorphism. The discriminator is the schema property - // name that is used to differentiate between other schema that inherit this - // schema. The property name used MUST be defined at this schema and it MUST - // be in the required property list. When used, the value MUST be the name of - // this schema or any schema that inherits it. - string discriminator = 2; - // Relevant only for Schema "properties" definitions. Declares the property as - // "read only". This means that it MAY be sent as part of a response but MUST - // NOT be sent as part of the request. Properties marked as readOnly being - // true SHOULD NOT be in the required list of the defined schema. Default - // value is false. - bool read_only = 3; - // field 4 is reserved for 'xml'. - reserved 4; - // Additional external documentation for this schema. - ExternalDocumentation external_docs = 5; - // A free-form property to include an example of an instance for this schema in JSON. - // This is copied verbatim to the output. - string example = 6; -} - -// `JSONSchema` represents properties from JSON Schema taken, and as used, in -// the OpenAPI v2 spec. -// -// This includes changes made by OpenAPI v2. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject -// -// See also: https://cswr.github.io/JsonSchema/spec/basic_types/, -// https://github.com/json-schema-org/json-schema-spec/blob/master/schema.json -// -// Example: -// -// message SimpleMessage { -// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { -// json_schema: { -// title: "SimpleMessage" -// description: "A simple message." -// required: ["id"] -// } -// }; -// -// // Id represents the message identifier. -// string id = 1; [ -// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { -// description: "The unique identifier of the simple message." -// }]; -// } -// -message JSONSchema { - // field 1 is reserved for '$id', omitted from OpenAPI v2. - reserved 1; - // field 2 is reserved for '$schema', omitted from OpenAPI v2. - reserved 2; - // Ref is used to define an external reference to include in the message. - // This could be a fully qualified proto message reference, and that type must - // be imported into the protofile. If no message is identified, the Ref will - // be used verbatim in the output. - // For example: - // `ref: ".google.protobuf.Timestamp"`. - string ref = 3; - // field 4 is reserved for '$comment', omitted from OpenAPI v2. - reserved 4; - // The title of the schema. - string title = 5; - // A short description of the schema. - string description = 6; - string default = 7; - bool read_only = 8; - // A free-form property to include a JSON example of this field. This is copied - // verbatim to the output swagger.json. Quotes must be escaped. - // This property is the same for 2.0 and 3.0.0 https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/3.0.0.md#schemaObject https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#schemaObject - string example = 9; - double multiple_of = 10; - // Maximum represents an inclusive upper limit for a numeric instance. The - // value of MUST be a number, - double maximum = 11; - bool exclusive_maximum = 12; - // minimum represents an inclusive lower limit for a numeric instance. The - // value of MUST be a number, - double minimum = 13; - bool exclusive_minimum = 14; - uint64 max_length = 15; - uint64 min_length = 16; - string pattern = 17; - // field 18 is reserved for 'additionalItems', omitted from OpenAPI v2. - reserved 18; - // field 19 is reserved for 'items', but in OpenAPI-specific way. - // TODO(ivucica): add 'items'? - reserved 19; - uint64 max_items = 20; - uint64 min_items = 21; - bool unique_items = 22; - // field 23 is reserved for 'contains', omitted from OpenAPI v2. - reserved 23; - uint64 max_properties = 24; - uint64 min_properties = 25; - repeated string required = 26; - // field 27 is reserved for 'additionalProperties', but in OpenAPI-specific - // way. TODO(ivucica): add 'additionalProperties'? - reserved 27; - // field 28 is reserved for 'definitions', omitted from OpenAPI v2. - reserved 28; - // field 29 is reserved for 'properties', but in OpenAPI-specific way. - // TODO(ivucica): add 'additionalProperties'? - reserved 29; - // following fields are reserved, as the properties have been omitted from - // OpenAPI v2: - // patternProperties, dependencies, propertyNames, const - reserved 30 to 33; - // Items in 'array' must be unique. - repeated string array = 34; - - enum JSONSchemaSimpleTypes { - UNKNOWN = 0; - ARRAY = 1; - BOOLEAN = 2; - INTEGER = 3; - NULL = 4; - NUMBER = 5; - OBJECT = 6; - STRING = 7; - } - - repeated JSONSchemaSimpleTypes type = 35; - // `Format` - string format = 36; - // following fields are reserved, as the properties have been omitted from - // OpenAPI v2: contentMediaType, contentEncoding, if, then, else - reserved 37 to 41; - // field 42 is reserved for 'allOf', but in OpenAPI-specific way. - // TODO(ivucica): add 'allOf'? - reserved 42; - // following fields are reserved, as the properties have been omitted from - // OpenAPI v2: - // anyOf, oneOf, not - reserved 43 to 45; - // Items in `enum` must be unique https://tools.ietf.org/html/draft-fge-json-schema-validation-00#section-5.5.1 - repeated string enum = 46; - - // Additional field level properties used when generating the OpenAPI v2 file. - FieldConfiguration field_configuration = 1001; - - // 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. - // These properties are not defined by OpenAPIv2, but they are used to control the generation. - message FieldConfiguration { - // Alternative parameter name when used as path parameter. If set, this will - // be used as the complete parameter name when this field is used as a path - // parameter. Use this to avoid having auto generated path parameter names - // for overlapping paths. - string path_param_name = 47; - } - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 48; -} - -// `Tag` is a representation of OpenAPI v2 specification's Tag object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#tagObject -// -message Tag { - // The name of the tag. Use it to allow override of the name of a - // global Tag object, then use that name to reference the tag throughout the - // OpenAPI file. - string name = 1; - // A short description for the tag. GFM syntax can be used for rich text - // representation. - string description = 2; - // Additional external documentation for this tag. - ExternalDocumentation external_docs = 3; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 4; -} - -// `SecurityDefinitions` is a representation of OpenAPI v2 specification's -// Security Definitions object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityDefinitionsObject -// -// A declaration of the security schemes available to be used in the -// specification. This does not enforce the security schemes on the operations -// and only serves to provide the relevant details for each scheme. -message SecurityDefinitions { - // A single security scheme definition, mapping a "name" to the scheme it - // defines. - map security = 1; -} - -// `SecurityScheme` is a representation of OpenAPI v2 specification's -// Security Scheme object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securitySchemeObject -// -// Allows the definition of a security scheme that can be used by the -// operations. Supported schemes are basic authentication, an API key (either as -// a header or as a query parameter) and OAuth2's common flows (implicit, -// password, application and access code). -message SecurityScheme { - // The type of the security scheme. Valid values are "basic", - // "apiKey" or "oauth2". - enum Type { - TYPE_INVALID = 0; - TYPE_BASIC = 1; - TYPE_API_KEY = 2; - TYPE_OAUTH2 = 3; - } - - // The location of the API key. Valid values are "query" or "header". - enum In { - IN_INVALID = 0; - IN_QUERY = 1; - IN_HEADER = 2; - } - - // The flow used by the OAuth2 security scheme. Valid values are - // "implicit", "password", "application" or "accessCode". - enum Flow { - FLOW_INVALID = 0; - FLOW_IMPLICIT = 1; - FLOW_PASSWORD = 2; - FLOW_APPLICATION = 3; - FLOW_ACCESS_CODE = 4; - } - - // The type of the security scheme. Valid values are "basic", - // "apiKey" or "oauth2". - Type type = 1; - // A short description for security scheme. - string description = 2; - // The name of the header or query parameter to be used. - // Valid for apiKey. - string name = 3; - // The location of the API key. Valid values are "query" or - // "header". - // Valid for apiKey. - In in = 4; - // The flow used by the OAuth2 security scheme. Valid values are - // "implicit", "password", "application" or "accessCode". - // Valid for oauth2. - Flow flow = 5; - // The authorization URL to be used for this flow. This SHOULD be in - // the form of a URL. - // Valid for oauth2/implicit and oauth2/accessCode. - string authorization_url = 6; - // The token URL to be used for this flow. This SHOULD be in the - // form of a URL. - // Valid for oauth2/password, oauth2/application and oauth2/accessCode. - string token_url = 7; - // The available scopes for the OAuth2 security scheme. - // Valid for oauth2. - Scopes scopes = 8; - // Custom properties that start with "x-" such as "x-foo" used to describe - // extra functionality that is not covered by the standard OpenAPI Specification. - // See: https://swagger.io/docs/specification/2-0/swagger-extensions/ - map extensions = 9; -} - -// `SecurityRequirement` is a representation of OpenAPI v2 specification's -// Security Requirement object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#securityRequirementObject -// -// Lists the required security schemes to execute this operation. The object can -// have multiple security schemes declared in it which are all required (that -// is, there is a logical AND between the schemes). -// -// The name used for each property MUST correspond to a security scheme -// declared in the Security Definitions. -message SecurityRequirement { - // If the security scheme is of type "oauth2", then the value is a list of - // scope names required for the execution. For other security scheme types, - // the array MUST be empty. - message SecurityRequirementValue { - repeated string scope = 1; - } - // Each name must correspond to a security scheme which is declared in - // the Security Definitions. If the security scheme is of type "oauth2", - // then the value is a list of scope names required for the execution. - // For other security scheme types, the array MUST be empty. - map security_requirement = 1; -} - -// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. -// -// See: https://github.com/OAI/OpenAPI-Specification/blob/3.0.0/versions/2.0.md#scopesObject -// -// Lists the available scopes for an OAuth2 security scheme. -message Scopes { - // Maps between a name of a scope to a short description of it (as the value - // of the property). - map scope = 1; -} diff --git a/flyrs/setup.sh b/flyrs/setup.sh deleted file mode 100644 index 8bec45dae3..0000000000 --- a/flyrs/setup.sh +++ /dev/null @@ -1 +0,0 @@ -export PB_OUT_DIR=gen/pb_rust/flyteidl/ From cf66431ae24f9e1e639358851494614831cd5189 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 12 Apr 2024 14:30:40 +0800 Subject: [PATCH 06/16] re-org Signed-off-by: Austin Liu --- flyrs/Cargo.toml | 8 +- flyrs/perf.png | Bin 30593 -> 0 bytes flyrs/perf.py | 31 + flyrs/readme.md | 0 flyrs/src/gen/pb_rust/flyteidl/Cargo.toml | 15 - flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs | 957 ----- .../gen/pb_rust/flyteidl/flyteidl.admin.rs | 3341 ---------------- .../pb_rust/flyteidl/flyteidl.cacheservice.rs | 399 -- .../src/gen/pb_rust/flyteidl/flyteidl.core.rs | 3162 --------------- .../gen/pb_rust/flyteidl/flyteidl.event.rs | 398 -- .../flyteidl/flyteidl.plugins.kubeflow.rs | 207 - .../gen/pb_rust/flyteidl/flyteidl.plugins.rs | 346 -- .../gen/pb_rust/flyteidl/flyteidl.service.rs | 3509 ----------------- flyrs/src/gen/pb_rust/flyteidl/google.api.rs | 367 -- ...pc.gateway.protoc_gen_openapiv2.options.rs | 1019 ----- flyrs/src/gen/pb_rust/flyteidl/lib.rs | 27 - flyrs/src/lib.rs | 2 +- flyrs/test_flytekit_remote.py | 39 +- 18 files changed, 38 insertions(+), 13789 deletions(-) delete mode 100644 flyrs/perf.png create mode 100644 flyrs/perf.py create mode 100644 flyrs/readme.md delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/Cargo.toml delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/google.api.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs delete mode 100644 flyrs/src/gen/pb_rust/flyteidl/lib.rs diff --git a/flyrs/Cargo.toml b/flyrs/Cargo.toml index 5860a05679..125fbf5b90 100644 --- a/flyrs/Cargo.toml +++ b/flyrs/Cargo.toml @@ -1,4 +1,3 @@ -workspace = { members = ["src/gen/pb_rust/flyteidl"] } [package] name = "flyrs" version = "0.1.0" @@ -15,11 +14,10 @@ crate-type = ["cdylib"] [dependencies] prost = "0.12.3" tonic = "0.11.0" -tokio = { version = "1.9", features = ["full"] } +tokio = { version = "1.37.0", features = ["full"] } pyo3 = { version = "0.21", features = ["extension-module", "experimental-async"] } -pyo3-asyncio = { version = "0.14", features = ["tokio-runtime"] } -prost-types = "0.12.3" -flyteidl = { path="src/gen/pb_rust/flyteidl" } + +flyteidl = { path="../../flyte/flyteidl" } [build-dependencies] diff --git a/flyrs/perf.png b/flyrs/perf.png deleted file mode 100644 index 26e384d6aca8ca1dc83a960d2d3a8d2147e66b20..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 30593 zcmeFZc{r49_&z=e*gD8k_c+Sd!_(2#MOIo~ z`mE$}2QM!-PemCS=l^^>gJo z+k2xa(Y6c8#a*we3P%O{yZuuaIy#b*_xAmjW>(y~0v0`0wj4ch=CgOqnz=$E;6ERF znLN6Gn3{r+cXb=tFr_FI~FychBEPi+PW(c2JN0-G80`YyUUMbnZ5D-Lk28T@({0{+`W4jZeeWVOO#%cpty;Z)p5Ah=KlWvIRl8K zD<=`6%ox0`i3!)p$jDS{0=1>3W%;TmIb8MbU3uSCVQXt^HgSU{8!=(_zhU}H4SwN< z9e(oU$v<3Nzr7UVFH;^lGB!EcSRXT8@3H3N^_J$!{TO$;{O&nDWs;R(&nPj_C@bDw># z;Awa2)`zCwJvrx(nW?E6&{JNlYPN}0zN)5nWN(u&`Ru~T*eA@$#QxT(clAeMjD0sh z+}rkCUS>p0%-b6|SDTuegym<9wq|mZEMA*pT+X@tdNtji#Cq&lTU{_kvMO3d^^oZ4 zffi=_=jE$zeA;l4Zr}SJq}a zg#vch(~#aVmqTgEuGjke!ehrOt!h5DQQaP+@R;q=t#BIrJy0xUTwoT7CM@9&-`ch( z-fN8yt*g6u?3_bgZNT1yW#(C1+GJV#nY_%v*J_b0_r5+qq4IpV13vDg!JDH82M6B^ z%v0YR=1q81$@^_OJ)ZQK>Ckd>JI#LTW)?gSucBAPFxGqY=V!9*O}uNeN`T_1{~E*o z-d3K+bQ_zb$@AuCJZtSTxa(LH0bhM6C`j%!fS7KpKiJnZju#b2h>F>aHH1zbR<)on z!S?^d%6esYea`n_XEs^M*W+=&n0`iwY`4-CI3|Nd))WZsBxw;r!N;V~6TtZIcrfzH z*R4*sCo4WYW%yRdy6uJdN+AY<;u+6{603UhpPx*_&xD0B@Q5CbgMM5c+D2}t6OD{-qqFhDW{BH&edlo6;9j)JjQ;sHbBy(D5(5Fzrp8659_Et zJb(VrOlN9KX}1bp-Af5$t_v=_Tcc=IM7rX`yRfYni1d>!v7{$hiFJ%(muTK3;JFpO zexGldX-{GbqT`fCEKeoq8`Su$;A<~HaOc2FG1Z?01wDk~;=UE|nFaKIFB8{EnR3!~ zd~pY&6y73yC5(Q1Z?7-w5=F}S2hFCgpD8cCJay~jtq*L5d3uy%e+qGLF9wr;|8VP1 z;;a<>6q3UV-k3tTuD~3tnIz43E8plDTG_P1iA};N+8m4evP4HuAI*e5RB72*7%UBM zdnq9{V4QpHTEY!cbmdz-kf-EN|r?*DG9JsC2h^9DqAK*pnLVFSgOBa2@S}4bQYX>|E@mlc}j2 z`RoS?=HUFSZ1X?d+`gQd7uAFrkZArp6;BxXoVNXl?gJ(2<1*YQ5p8_!@gj8ks{7|Y zKiZh+(GYmHgrr6ZJ72trh~ku)* z;NKgGghYA9zKeIJD_tATE*BEzL_BGws)|dk#Xztw43$U03XlFQhqEnf*J1Vj-Hnda z3$ft>fv?Wmwk`iOb?t!_pYJb3_h(*F6V^?ZV>o3TW{y3s&u>!8`)G^m)3XdGv#ZMe zOLuPGWP?u-JbN}iD=X_rp?R%;a#7J~TJ}>JAb1YtSclU^7-#^jy&#W1@K#;-R9m^78870{7FTXDb%)m{`c!h9&p3Mto<}w?%%bsR<`{ zWH7Kx3@suk51|q#Drm+AEd`6Ik|#FhjIuLpmA=l&w%6ZYkj0wkbNH`4BW%qc zS0*>2BcV^>P~5@eaQF~5)O^8w?xO)^IpP}mCsR!Gw8ywd)$+x;vw5Tjy4 z=V>`4zXB>?oi;Om@UY742qoo>Di7P4?##@wSK@|mn*=7Or`2oycO3fOGKlG=X}bM_ zD&;s(bYif?R_OHU=kX%itIa&FZc|_5)ZX7x6YexAZm8IlvSgg=!%zQej=8n1txiG$ zvC5lVu+PMu*-ZcS9*t-L)eL|~Nb{czx{$FVB96+r{L*}O>^!N04Zy*8Z1ec>V-ikV z9&gl1laEu6;#ld`miYoHgy;i6LK|xLXuuX9WSe`xD&5ttUR@mz7ZBajU}4ru{^i0Y zM?e3)SSl#@T7C1g3X$su>;QPeGBXA0>+1oieS4$DN4;NB2NH=y5_&bS_97dL)8HS6L+`aTqFj4x@hWO*M7J~L{L0o=9)J)@^J=!F_AfQQ zYb?A;fU+Kkgor~@U#eMSgp?ykHGR$Z<4sbOB$4Iyk`A>Mg*#}vo^l+(NV zY!%%+#jEJ(;3&?7!|LE*)Gz^C9I%H9+}|*!WjpDHU)ov3E^YtLiG8KuDGGIGVxZW@ zX}Fp_2q1?+h0|@Qm=~cqy86UaR?4TGbG&>&+_{YlRG#fhC!xX0xra73Hp%B4SrE0D zSE%F1k0SsTJ2^>~+INeJiiSZ>w%i(kS~9wBCMf7QQivN8w_p^gi1k%HxMl6`0m^{bj)QQ)A4EgrSkHwl%F!2ycozCv z-FnmMq+#=1+Z6vCbCFpNiZJvfEUYAO|IwZD`|7E3&JIImbI!QKLHo?<-dycp-lG9! zuA>0X#C*_A&}848>;LMkl{|RD{o)QhS1zv}=uZqh+aG1unC}G9`-`GMs#p$Z0Xl6i3^u}D-Y&UE zQ;0iIgu7IcFfM3nX_*~S>3B#&<50Fq4F&Wzq;m`_B5dE4w$gI1<5gT|>IKqyU;I@? zbM$Bg`jbP?YdBdf%*@$F&{zlQrj8BTpOEY*m|E%4SRQ)dJ({U$z*~+G6-sJXd8)gZ z>9=$EqrE-qV1I9ICe_sg_5|TE_4N{@LE@PkH*P!$4HfaRgkr3(v~q-0?IesoS`Ak} zToLjN2nc|d4LkRcgy^7($ip!w){deN_Y*ga-W!BIWtTYX_50fc6^-$R(5j`{J(ee` zLG~HGRL~w>jp6kj_RLWB+f3|-6wVr5yubIs7&BgY{RIASX``S0p!d}|c0@}w_m%p` zl%#`u|HlWXfuixZEwTK{DG*Q0|NL`NQ}ZZv0Q%o|7JHxCX9jQpQU$P1QHMTQ6L_ne z+C2Xwf3U)t=hL0?20R{lunpPqv}3O@39V$J4_f;z(YhBeUL;lYiZotTgu_#ZHU_|7-h62e7-=yIy1oMGtT%5 z+v&EKyh=XL0T!>eNfa)3A(wvr^o7o?+;eupOPUXPS%?0di<2q2gppVESuJu$QAAhD z`6#Hvr+t>KGHdy|EGeK1MF2v;e(x_^(g#Hdh ziw0C!E(H%^PHD3&OI)B5ak)_mZuQ*#?-9_Iy1~u22xLNb6NL%@N&2MWTYAV})40QZ z8Tc3qD>&msXiI;=c0nPStnOdm+_W31@l8`jXFx6QD=J6>M91syJYL`$sE!@oWFH|R>>S^YX!Nc}>8fw`n{b>lX1t$VP>SJrd3YZ?G# zrDOQvcw=}1#Qt{X^XJd+eH>UTa~h2Q?6>8yvuevIpwaSX$u^n`WGhl+)n zaH46byefxX5~WP9LOCti%K-R4u~CR7n5DN(U7frO^&Nuj+A@iVnc$PR+S`1_V?Vpb zBmYmLB*2D=xL!4Zi$QRjzQLgd=0pveK`!M-=ST;n^H9cUxWra_b*8f&&!YD2?n1Xt>iB(_tRo|jTZrr7x@lV!~aM$-2&5#2^r)W#;!wYd?yBp~R z^A%2`jJekw8Ry?Twca{WTWIOaTR$yj=gM%?<^_P{R&V<3z zc}z6XoVD-z_%K3Uc;!hr{`)%K6@|6e2bg70uX@h7u!13xnsIa7fIy(awv+A_{z7pqTw?qBJW9c);>(!aG}IS{MVPL<(brq`d%*l zbp=NjrQLUZFLQNigS5U*dqBN*0ivcAXn-_n4TIwX0-0(A&r|SRaz7atxz04W7=Ji2 z8b}vPt6ItLc7$^uUh~?EdS__gW_$Zu1N2c2EFeJ>fnvVfFQ&cgEdexU;ralg(@b2d{Go zM@Q#>IB(N2hEdWv8LNllrgAu-p2mHjX`=C2QAZ}rUc{o7Tg9F?fc4i4jx(7>ya)W&BX7#<-oU<1!-G|9y3ZbqL>p`*K$L z&*?sW=S&=1P4#yH^nelW+jZ=4nYjN!RlvH0c*wuEB51R4G&1G9^EUP{5L;%tw<#`1 zG_N9Gs(mn<#mG>;zh~D~CWWR&i%0$Y`joGl4tS2*y)Ksv&iDJ(wd=KQoJL(Vwq1tq zVR$Ztxlb;flZjvb+e8VA&@hziI*bNTtEg14yKOG8@f_T>AaY z`Wh_*+T9&>HDz_$dAt2ZYr#~pRd9pl{cO@^GK&#@(Li&W+E?@0df@c$LDm`rJD$#w z&lNvhhZV8IS;6VyimnzK?eWMLXVF-IL`wB#j@PS77|_0}K>dBgdOxLy%R}3Cz2YMS znp<|rU0juzwVZV9J*kltt}?GaVqQIQQ zh2id$FPt1@<2rz*3;DBR_mde71gIUoPx7q4Pa{L{LyaJ5S(iCG26X;abj-3zjq?3 zx_?Bb_Gq5orR&cL7q`~?U|pUGX=DZXnUsBXQQgjR-jFcrfMwzSyGK(NU%u4W1*6Q% z4D%KQt5PrIRW94p`){znIAz-i&1W>P#S{g zR}`QlBZ7=Af)5Y+U5pFp4f8HXvUL3v>H2%@z7F5{-_5n|jqz?SQ-H^E#GT!SRFluh@MlO>np!PyTn0 zru5PjDW4r1bhP;8;_!o3&PmgK-1Vnqs`hLrg@Az1EZ;WaLa1u-cXC*A4{YE*d&>Dz z!_V7$jkB3oQswlFU)HrIEbsoQxWtlWrhsg~INcV@kAHFX@2z0&MyCrejfXpLOWa}m zS*Y(mzJy&m*gB_ZuE?b{kobOC?C|a1e3+`>_mk>#KHnT%{Lfq4j^Fog(WI#`q$}2U zB%}6%sh#pILuq3YMZ{nIJQfQLNthggO1Wth3;E)?GB(H7oSmD- zwT3J~bdVsZ?qu}oM&n?$@Wa31?Ncy}b!HuUAzmRn{4IOAkx5*XD0Ta8o>%g@){5PS zf2gvvY0(}_f44Eiv6=q&H!^9npfM}t=XRl3d2@EFmhAO;%E=ZyVQ;bYQ(*ah1=FNA ze1F5W+hgZ!&E~N(2WU|VLlv^-v@~cO@h()tFq~fcSgb1Bfv~$ciHDk%cSC$I4u?D~ z`y-fw;Q4dRDoIUkqoaKUXJi{_=_J}VFRVoti3xH2ix=C0}fviA(=^Y;fdYQ>Ke z%_|Y+*rB{`YwPE#IOpd?qGzb8)}5Gg1oVZ8q+b0;zOm=>*d=L;8ck@;PFmI~ZT#ua zgWgQOS&HTf84FT`_(eB|-rK{%N1+Ahkj*9(BdwG2u)qL0QM`|FCpFUyusY zh~>`(v^v$5o(gP05Zh7}fYN7#?f3WQNdz!s#Y0K1P{*R^d*f z&+())&BC*k`k5*uF)e;!dy0*nUHz?2azu2ru;!r?E3^AZPtLa*g^zb^F-Y&Qtx;-K zRgLy)e{L#<$4sh6Fg&YM!I7-l)>kyzS50;gP+64AdW~k+7|@V_8zfDsWNHv%dKa6c z0X|h6W;V(>6@3cCq6P_xQr7DZU*o^DerW1bP^9&NC4%0PX$21*gQ(|B$IU{E^bl$$ zMH8R^-yCyyKYj1$_k8VbOthFbtoHY@CK`ut2r4Ejn=ErAWUiBUzW0>KIhU9-g)?Dd z{VjdSDqO?GGQG+q<9uTDjs(D}0wqFU2Q(g#J>q;yN&tSSSglj=^DcZ!&-S27G4YKt3oCf>~|}pJPkrpno7X2Am9X&A-KrK zmX^9f4oj2N1yFVhgV&oiz*aDzMXn9RpcumeFTFG49`S7n!_8)d@F5`tY(2^-i!44r zDtpi$16*sV_mai2gG0A>d`Q&wBCAV(AMnNR4sOWO9MWZYsXGIra@0wkl^$PI<^yK= zkQA`RD3MsU(+6r(VZ6>{ys~!mKR(==1ir_AYuI~5DDZ;wkQ(T&>3?r2g@I?Gsf&(t z$kAL>C&;mnmjR8hxb)*g1>IezkIn>q@s)9XgW7wU)TgC-lr9Sjw$^cNuN(dz?32R` zqg6YSDW=;Lj0~J4LEWg?uaFXdV8kU$m2#dUslB*lN^WisAIwp=yu6H5JOG7S>m~?= zZ+zn7mKRFDmFMf;(M=sXQNb97<3lcdmv*gn)PA(8gc{W#t3 zw5Q$Pkw+s+$d*ja`|%4Q7g3A9TEAsqd2d|E50dcBk0x_+R$fw!Ax=B=V6!cUs0Dj5?YGm)fOXd z0uVCh)lE#Q#5zzA2rd zx^9)d{<4Q%!_&`xfC0BRjIrjpT5}6|OBIc_IJGxRuHkvX07}W6?5j`Rf)tZLZS*l{ zqIrS(AIf9#)paZyS+>)dRYXdF)tucRohR*e!u-DUQQ*f&T zP-n4^^AOd2Ox)oU*AQ;~ix|?1y=9cPvl!6QN$EFTSMF*wWa1ez1E_4<29}dlKAT%j z&1S+jI$tv+bAvMq8u~wIH2QL5PN6JJxF$>HM&h!5_k@6+ zQ0lv8*On-yy}!Gm;6BmxFg7+aG8@hkiJStiIvgm%)9w>$(7C5}yad+u`EQ5RvJ5}Q zuJjOqvHs|X^TK}O%>zcY)ci%}j1&~k5@Wr&*WX<)NfL_x$jcPe_f{8_>tk?~!Xays zpMJFN08$BBW0o8Ge-N|tbW`PCNZyWjpKNC4;`*9o8R$Dai6gFRccm#bLUMU`BZrDa zQG-~vF05W5>8w(Y#hI!j}heAaIuCp$vvmcL!8I8$Py(L%)+ zB`l$V?re$S(+1Mg0hn80$V8;28H3=#WFM{n#;fT6Kba^FNt27fgOFSlpmeSH4!enE43fO&HD=SN7;sFWnHRwT#DJZ~S9l9lN$Ir&1wD7(Z0 zOQT>$Ll1&v3^@x638Y@omg;W6%?~#tfwUan^wmpOR5vr@g-^LwqzaA=UL7om@dfUd1w9`P$eeQL-&ifFdBMDLf2Lo)?F zFHjScA)ZrYYPTn1x@A3QbS7tK?Ez5Sg%s-u+L9H}MNaU*cF|`@mZ7r&z_6*RC@K8{ znW4vP{(CFb>lR6~%5PI0$$d3@i&#uR=hv^A6tBL1{faCw1|cyr;`CdTvyIci*_j{4 z&#Jsylez?~W!`EsN_Qq=WK(2WuckE7?!=)$=sY;-|X1g=RX3e`0qp;_C z8A`2LmqO%G3tT{^+pJ7{ft=pZ*toK*Au|d}N#O3BShe?(9>m_))C)3z=W+ocMXrJs zMyc&7JD?$6q+L?atJ7o_J|}K#pQZv18h~yG_wq_GW~4-$R&346CXTJ!Hu{VnXWSii zb@e!LLs3B?AsSj*k7`~phs~lIBbYafLAT4S>Pk*n=)SF=R^++cnf?INo)rGxkl-aF zc=#4_^&TQaB_IWs%K+r+68mmFl4L{#T80)i&vx$q5vU5w{nh842A(gBFX3IC7t@ga z73-h(hRzt@mn^s7DZm;w6lw&F*+XY(Ip;;PA6UZ)0MKDHF};JS6;p zMofR`CDN|D0&m+s#9TYwCh~w=@O^v^nHyl%kGD&&wiiZ5yy0lB(-63nsqzmGoqE*J z5u>*{C5dOerE7LSFa(c`R4{wgA&^7HYA&5U2$LEgcWLwLse3e}&af+^Jl40c;Ku=R z4OOJk9NlWvFxLlZ8>IAMY4f3q-CG}DIf+EPp^4cfBLcb1ecB2EjP?}xN$%Lgw&xC& zwH8bx(Y4NYdqmGyXM}DyjuLk&3P&%D4&>s2&eEonAV&UD&@e^F+IQ)ILE&(x8_lwC zp?S=sDgsP@@6JNiTM!y8w#oMMj))At918z)Psbd6$N>F8WIEZk7ezw;62^KJENZ>H z8WdI#IiQMLgVuBG2%y>CG$b{j;y~*LBIR7McC~F+nv)1|ImEn8O0c0~cVod(V`q0y z%#=pZk+f!@ph6xG1;Sb3HdyzF;J#b#kS+2qOgEkWyyYZeb$0*C{=)6wJ&iNEMK-!` zb%J?5E{Y3!f9+8T0!n!Tz=&SraY4aDIp4E4-`{Ptto1MQ$knxJWjo-IsR%)e>Q^NB zPcj^30N|^o6H@_p++WFHIqJq2fp3)o1srY9ch;y~Hcg1)Y8`+o!Lc54l+OB0Ay?H`W`k zQ40v83q>O4cClTfeOlIY20lBsmAlv9Jun8Ymz*MDdUe8jeV^b)XJumYDj|uJnYpO1 z3XZ!eA|!;MBoV6=FPWs`&G=}GMAK~sY}F@OHZ(GcwNI-nCFYP7frm=Z!UCKe=Cw}L zC^KEl@(1|ST)b(LNtU(0Dt7lbl7<%tn6E|$(R0U7uL|-@4N{DOp62i^iwx{c;1e5r z+oG~FmC$>LG5rgGM#l3CYdx|And8o#QsTY;gXAUo(T-}+4qL{cq+Uo+ z1Mq9K#G+6cj-Un2$}2KNV=xy0$U&Nvtv>+kUV^N>9?1uyMXyRkNSQVy%c8h3?+s6I z@0wUvAEBUbcy~i?d%E+%?6%)Ru`sUet1e*hagMtDwf%iONGh!_#1kmk;fS9oF8)?1 zpmb2lfPQ&u@RC65KL>$(4B#kXQY{-VeE49X)_h!FIM?mou~*QBOnOwgRhfYV749{6 zV0;xyJosRh-r_{m)E57?j#YdQ$>5y-CDlddY-KFNAV(&7U{83vygFTPcb!>FftMyo zH{~4J@pI>103c2n7f4Xq+1)jOrY1$sd0x#CIPTHjToT>B`i9Q}=@_5BDiXPWfOVe3 zSM|BzwqFyrlFmPfx4fga_*{t0ZF#I7L2_o=b*79jEiI{WLl>`Y#!Jt4KXoiR7}$4^ z_3xb1lHuWFxDr<9w=~G&G5vUDGTVJp`18kzUlba~n|H$*Hy&JNe!K!Ms4mjSR;%}`KLfr z%n-poBe+`?bK<(10X2zGdUMsjYd{YbunS55H6mhratiDR zY6hHtUoH2iztOXM@P{&y=#&TQ>6qf$2hl_V9yraJPSoD}JgWs07zwihIowN$qLtoH6NzMaM%E64ofjBtWa|L5-;}s_5 z7r$~LOm$2v1|rj#co6~u*xEaJdRjk^aB|wqoN;)bJ}KjAhs<8nmU;g}U-*+7w@tJm zXE#)Ga3ag*9{h0hh3u@f`#XnM`O)|6HkVkA1dcdi+vxPJTy4(OCR(;2S6sPgp>UkC zpHu#*D{hbw#y~J|@m&)g6j6^)uf_54Q}VL2e*xo8;UIc-N(&mq*^BDXb2ja)eJc^E zSp^CY+BXd=;TzjgQ6V>SoV*)G14FWoZ?9^bR9=VEBjw@6wrr@cOmdNI1FU|(dWe4i z$r6_VJ;};?pFZ+-N<@SwYN06B-dGb8!AM-bmVz!_!c#jB)eV)S_IfnNqhyW? z3u{S>35o|n%>ld9`odrWN%)1zIqe2;5ntuA4hXLkK%+gZd`>i5{*5C#l!IiO7I>d>cIJ}4~}KNg5R6(n1UmO zQ?!%71Bxu+%(774-hJ>jSyoGWYc>-YP*Bgz^I|@G1(R!eD8#X;kB*Ap6L}7_CY%eB z*&2K;VwCp`0RNQ!?GSaXq&aK$D)hvlTlTL~PxabbeK@T{=O768bfb{(Po-Zh<0DcIv{hqT+xoKY6hnP9$>wfVq#;N#>dBxP*F86x&Hjf4N4}e=qz)bFb5xK z#&Qqr8ZAHFLDk}fkD{9gXwn3H>cVh!7qM9YpJ~y88ErirbtD=mGjKQ3e`i%>!19eI zGq^l|U%v&nA_+d2Y3zTC8j**_9zK=MR%RtlF8v>A62$Pgrqr4|6^6)tpeZ>bg*HxC z@T5{zRi!=!z5Lz((l}^UiB@^OT2ssR(F6mN>M{oO!2yUs00UrYY*<)@q2g`_UrrNvNgVcso#=-!ngqNAN!N~4v0fNp@DJ< zwX2Ua1#0%By_Q^N6F8Ww+5q?gqp`U-+zw`$tAL&q&<9E+=PmG5!qE4SVliGGIw%W& zX1(;1Kl!>D^jbx|JwP*NhXlN)M}4UjH_TFtRKvZ9cta+BSMc}uYphyNzcPS?V(MD| zExW?8z~A?kGS4DT^@fZMy-ubJHCv|g10MEp`rr#An(=2u1RWp4Azh>Z;7{_iu;P^M zI=Dz)5Rz#a7#)I@t)Sr6*uWWVd<%AI=*i~LgUuCJ!El^S%h>z8vD_r5?R*nvIoB|i z05%}YW_THbz)~i7>J)7d94KI0nT~-D0>%(Xnr{{3L!AkxwZEiG&jcuf$jKL$ zsu>-eESr5({FSPJzrWK@GfdYaD{~dQ#^EzA4vwp>@gii~V8r|D5s-7@O15ay}(|J5V>9(UbzN|nIvb*W4Q>^b{!1C*#T1n>xplmMQF z>p52$AN_nYL~?^7Yl3 z9%=X8b6#`gvUURmgWQFhz?Y{)hD6kq5@E{&kI?_a$=L!X@ovDvh?C$i2Vij`edhE( zaWCM`eKv0&r7KNXe8$gESW7Ey{M!6(J-N>`JF+t>DJlJ@?XnfNBs@a%uG=|V!a#9| zSccDIXxmAyc*Ka`k}_D<>d}XLme-P`38buZXhR>(_n`k))ZcF6UG!6SRssv$uyG`| zTJ6!wL`9+H%Ttjk8fNCYag`v#PZPM0bM zW)*-SYPB4ba^R}cJO;WvJfBTC-(#Q$P+SGZ6GT7X!Lz9njg16UlJse(SLSjiYVqO! z<^Y(X5tES-jQe?t!k=X`^Xt`uL0V_f=7&3#t5ZBAWn(d6kX=N+(_ zKM4)RFCd^={`tvP^bfAbe~e8QJomhc?7g6_|2tI>=EQ`G&I@ z`ci@(`jBJ5-~R@9LdiR2zpD%LngY>+@nIe550bp1o<3d}94E1jU_C)JuW`O0EOwjSh|B%KWXJ4srfoIwOQ;6Dp6|lGPpI)0j<;**3!Z-dcca+d$ z(G0L(1PX?xj)F<44hUD0+|c`4ATa2!dm@c{rW+&S_seD*>(x2~xeUTEp+%Y?=NJ#c z|3(L*4w~v>m_tJVq1z6V8zjpUR4$?as4b3KP9OQ6t!z)a&CmlS1QxKJDdo&Z`;XpT zCj;Y4bgteZIE-q3MM-DX#vF-Aw^3^LYm_1U135mR-=r=$KD0hXG_v8Kv}LUR`zsIF#E%2v#=46afIw=o3J@q%Eg zx9Q+3M?e7Zf!#O;h9ayES0-C%YP;?(QZw?7K|p}HFe74g7KJ4vjhR6dje~a-HxmG( zmD@Wz!zgpXc>1oi;%+<6zhWS;+|N%mnrE&;!?>i(b++k1;eink5u7^GoRWg^LkfyV zNn9JLkq618mt^r2w;rl+ZU=*`JAQWMU-#r+&uk`uO|ZY+%r_#WOagT70&4Ln1H-dJ z-_=&J_vTo+?Ck9OKUlS-eimX_kuqjS0|QT6lk23S^v7NIQ16Tws2G7raJNB@<=mDf zl@cT*Kd@N0w6ui7&>}en#pKq72S20Xkc{?Jfr3#F;vQ)L5_;H)@_viShETdP$3Chc zAhDa7SV+EYsH-%Az%1(k-XzI@K&3}+od8cFw+%H)Y5lqR&?h%*(0d(54+;$LIDJe% zRdiP!X(g}Cfvn!zTO#m+UhNo}6B~p+*s&+^zrbuHS)tgP8-FQ; zio_I=uq34WN$N1j_>p0NTmI8N2G}kN zo)oO2n%`Eve*nH-{7`5Pf%^OV`@V;krZDoRJBqYzdx5OYh!8H7GF>nWghapLr%L=s z@nQ8JBV46C*ZDS;#b+@U+^Y%t094m2jn#VPG*X-R|DVl(u=<(i&82*!&M+J~<${Mx zI&$=(0lu_F<GEv$*1-JkM zFd-ki-~zf8r_b(O9t2PO!#r!4$1k;^Cb65Dmv)>K*S?Jli_AA__-Saj(2ZVrPsgE? z<^cy$s51(3bj!%+WiU@>SD7t?L(ZYcJX95fFBmj^8%;7v>(~>GNZkkhjKudf2aFQ=Pe9IU?nex)y=P)oJK21rMiu5Yf+`+0OOmlIEenV@>9^{`*E(rs|TL ztzi6AL&HK{Ot(rYet&mx3dO1I#;bT_<4>bR;Xty@{qI4l2SL*27o!m1;-2~~GzNm? z>VQowGe5r`c)tNY2*F6`Ux22eAqz8kV(qMPZ_2F5%I;HwtmbI`RyQP!VQKqZ^FRxB z7<=U=8<@F@tmU?S{lo$X;g|&H97-<^l}nIlYVgm9!U#~{A#fRAO7hda)r6g^JSZP+ zJSNSx`7K}C`?@9FFd8TH?OhyUiId${pZ(bV#w;ijU!MUYsDz1E(qIzItg#w^j)zn-3fT2o+t;w%ejZ|U9(N4jYP;VT7Z91vdE zVKBg(mmY%anj|9yKUEWMCH{wYMtSMdXS9%PPv&UE#*Ct>%t*rtpc(fqOY>6$wqoO?rl7tYLM2ChNs_@v-9ZK`IBNW_ z40hVT(pl%`HgtBfG18RYFt`Qtld)&QfTy_e-arIKkTqcN=63mgc7-L6nZv2qx%bpu z#s|*$j8h_+=mv6!uRbdgI=@)AGD$jA|82laubh4wVgGH*o7RL*UQR9%lErF^0NU+< z=zHqsyNjSru+Y&yj{}uJT_J87>6dV8fLh>dUb6HLk4UX~71 zwI}MXKLznM1}G*Nlj$E`f^k8}xo+bPY^QFXOwMPC!-5)Bv(6%E;e;{8RM0I`QH}IC zf=^dPO2wC@H#dZgA`5+p3B!vY>}-^FT1)fAyaGeBn6XwS~TkWLbh)AUH*r zaG)SX2vd?cA{3$R2@UHzl0~3a@!>pJLG;5J`M`Tm4N)=!7ztH$jkzitcUW-=tcBQp zlB(6h*D>ik6K!7Myz7NEFL}c^Zu;JU<-3HSU{Iq}5R~o8#owt}S;s*F(?`ItF=+}Y z9_Z1km(O^z@2c}0_y>%yb%Wr9q(y`8#t1jp2h$h3o1wa?Ktrg-r+`iZ*mr&YM^u8C ze)AF%CITS2zJ)I$K#-xh%Bnx` zo;-@ZG>=P#7?XDt9CO$)ND%;uNTa_{^f(1pp(||)*cxsj9-`lPLA;Ilujz`lv&H3u z8Df)Q{ZkS<{Dwv+gC0iwHpMxlO7i4Anr6OS7zsLYWuX!j2`&B@{UZbvb_Zr#2C?2U z2w;J8U?F{c6^7(}P0z)>@p%C$%04p`9QnT_g*1SR!VJ%IayvGTywO5oHJOnOTLR`m z>T2+;iFkqJOB!qrDm!534Ln#2Nq_8v{_NT>-tYL6J8^*ZJYJB40+W?-Qe_%v1GQf9(D0t4?M6kK{7MO3w zEyeJmA^;^1sr$H=`Zbr>R{Q{~26p&;g82#QfL-B_pbfc1gN3TDC( zfIWGrnfTwprur|}Q$u;37_{pMYI{i<2>_bQ5{IWn&vxUlHda3bkF3c*fjg8c`!~n+ zK@N30hX@M`BaPUSCjCGQ0wD>84NrSaY3^<Do~on=4D`1B&eI<*!1R zjm?)5**e@vZTbO2XFOH;1F_p*;88i@Q9(yr$q9f4UdzWv9s#_>zapgQgfAm+(G`!D zXQe{tsTiMRqQlIpTy}m!S6>&S!C!1hG1Kiz9q50c0N<^#o*M*Nc6USGO}NKj)UEn+Q)zY{P#k%sjGQubL~SI1sJYX_FpFR z`CX?7b+aDGR@ed3#0P>zb3m5xA(Z!%#$i}Rbx8DE_dl#Qv#Sq=pf5Rg-l{zj77hGL zzWY#O$7L*4Olk(CDL+DE8(*UEliVTt1Zk>KDgZX{9!URsFNG$28yd%}X07%gVTbdR z($l(IW)oe_F}AoPu68hpTht*Y__fTKrP=fIi0X-^}p>iwOdmJGwJMuod^kd;0Qyud6a3Yqtc8Zo%AM2fh^VCQyx?bggId9$Y;;K z^!FzP1qB@`gxS4jk^=IR<;6`K&~(Qs<$YPs+Ld)XWd>{}y5h@d?v$6TT?$eDB9?g( zM$XI%K^g7c!Zh~PMe{2UwYEpS34qB58VqI>ER{~TftG#BRyBvedv^gJ#!#B~HLg*Q zX?wk|^(F?GAaPLD^Nl{G)zoMXp(dV~_rOPDhYQl3Yxj(`_i&+Mkn>azityda)R_Tx zS3n878DVpd_)nw-Kp{BtC$>#>6Z zvtowOmlCik%1Hk`sg9If($@b3UI$!9tt8F?G{$74w=`HnsI9;#O3$w}(AukJAco-? z@#~hZ^;QiYRmA7+1r8jLospKYmWRrkvXTi!gwM_@nLJd4S5;)=CXa)#mVMxm_kD3)?N#}Gr(=Tt~Bt=Mm*Sb1@8gaa1^h{X`ZqHO?u_q6*(fi zG(B#gihv(IP5jb4l-I<53JiG)vO1|$Yct|d`Fv!b$@2@+ zj4H%$c8wc1W{k z6u`EB%$bsQV9YqTjbXIJTc*sBf`lXg^oPsT2Kn#AzqbS9?$yyzMkb&}>=@2J_$iD& zAWp%QOsj#KLG9Y?_zz=iDB%0slc+%4$J>Q|m3U_!s=bYn_9OzWTh7%>_=-orVDfX0 zH{b{{^N<)0bV?5njRptC+&ytYaXeImuo~;RS%;ED@tq$caOtYDV+C5A8>#FDlI5zoExDMgaGj!Je47ET3~?F+c4iqDsX3- zGq1f`eB;D(;fL^z8;kU$`G7|o@YOZ0@>e`fxN3+co#9rT6v^jkXxXo2klG-+0CZkZ zc~Buh&3^#N+Ct_b4PZh&{XeyRX;{r|yZ$OAh0>rwDJm*MGole0npTuYh9)8p_q*@wzV2%{ z&-1E%Q-Sgshq1Zg%&KNdho zr)&t79^**p;nFG3?E~F05k3;B8F5&Xb2Ja^#fB|BJ7Xfp4psRbI0Z@@~e{5E|{DnFoe{qfC1^22` zWnbeOQUx{lU5OQ0A!upjH$Fujeh6=|O^4Hqg7PG}Smos8Sf|v1RAK)^JY+?h@s)-7 z_CGlWr~c%4^YnN_OnUb_PH->0=RVmQrJgS_2foiJ4NI^VOMv!uX!89_0q8+Nu_Jqz z*sX!ZbB}KU+w?*uIKhc7t7tP-H2E#pEh~oZ>BTK%85lEzsHah*Z(uns-(q$F?Qozn z?!`BkBEHpru;(u&hYltz;UIb)2$e~~a@07cQdc~6re8vmK!J55%!iHh_BR&20dDGJ z0&C_4>k_j>?Pq6~PYkERuZm8WeWGOBHt&Lh!5$izyGI7EF2@CNtXRu7@MZV=5cbb7Wb1%<23eHu zi1ypxs+V1{EC!?`{;djn^RJ}$Of(fyeN6M6zVjZq_&D?C(1(v1B%LK4SWz+O~C4rx$*7IiTWJe z5_`sb8ib~XvIuF(K^9CT#bsbW|Kcm%cn^dt(Jw%6oPsxg17LDB012AlPK0l>xre7~ zQqF2*ALLj1xRB#)9j-bX`5;e1e^2@CsSUn3vddZ>yF&_2ld~`FV5Mh{rjrEp_M@U#roW0fgNiSAUC@7 z`sm!zcFp11xCS&-2eK5rA#}t_RtCi=If!zoPOL(YXgyLQ5WA<7%5OHkGV^)4Lx2uqZzG8UeM2__s z90mBFg6`SQj*fRwa6(if0I*4Zs?B58;~n$!St~D%_cjjo#$IRTZd*K%W2Ty5W0Y4b zsBwb}UD6|IgBl3rg>_q7trM7Kp-zJK3X0FobT3m=Q>;js%fOfBFFVmbeFr8nX`C9| zm9qZ)xhD@?obkpQ_3O|WxmqOR4c1Cs5;|Zl1*gU>iEne*9~pjp5E1RaGVkpMgdkhD z9#T!Ld%BAD{Zl#HjalcrIcXS%Is9%I9%}JXsZhQArBLN;K(qAG%7vL?mKaHpp5AJ6 zZ}pu%53dV(woueqgnfJ#1%-FuNI|HHXOWwWDeH>D5Fvz_2Sx9lbSEl8cplbT)$fMe=>3{{J zopi`gWOQBGS1FUnN0ALeL*dxV4{DwBi;r&JlLiI~jDyzNmzys%vu!)CbPOTnyP|{} zxaH7ykAt-kbU8Tq1Z^*EoUIYH80!}g4LT@xg`pj}1V{y%92z~AmEpTlxY&u1h9V$; zW*wL~x4qc49v~A8x||GD2V$`=Ia-F$#gD!G3YZh0PW!a!ZTy$T?yO@waGR3+BMfFuwC zX3Nd(pT!i+1RtO%`3S=YZO&)l*|6>^bP~Wj*+nX_b6o~qfMEy%5wrlLHyUjWv3bUL4 zR^Fi-P6<*Ub|&tAn#yoU#Ss0!Cb`pBo70R13g>y*c)O-sFY2^b$4n*inOzN1zxMR_ z+9yZh;0^=m>3%={z)+^NQHfb^HH`UF&T@Hk%8@4k0Hqnr+6n7MC6NcV1_8Y^V~Yv7 zVgQr&I0?$jx!AMgmf%{wvdg-@&5k|Ro9)|+ohNZuA2a|d*Zo?ylKbpM&@~{&bq^IB zf9ts|f3oboy9u29cMMKFUzs&XQAa!#%+CZb3F_C?sxbM+p~jMOkqj3*O6ud+Us7GC zGituy1@6E;?FhVnR~)Z~R^!q1Yot@Ue8tuD!j2E&toh@+DFAAYgl!e9j67TJD~V9v zUjomNpa))e$yuU?mZixWt{OFz#FHv~wJK`pl)mcOFo%k}72go;X@+1sHVp0bT65o% z3ue|%E9JW}@3N$X@6N+L%z_Iq&RprQ1N4ME?%Q!td}{Aa4uDVH-rm;B66^h;`p1yf ze9FyMpRDUp@4@MjaDy7zsOEWwJDJ7&^e*l^b31n>9%#v7&iPw`~`!!W=828gk_Eqy}uv8d}Z3 z^^Ru&nx>}qwm>}~$U<4ZZ9N|fLx)xc3SSI!b;^mBCfj=!T6*>G`NNesbDgjBSxFx9 z^%AK}uIhX7)Wn9vW8yHqWBD>xhgW1_??5itAof@2KO}4Vy19>4Ep>F|{E4LA+6{Sc z8Uaqob3H%FjWZ{V2QYCR#6wYrAVy{{yz=xS7p21Su>NslU08%?0$uy<3j$;*BWT) zxzD?d)CS(eDag{p+oU159-oa-*LQz4mbvxW)x^cBr5aU{vL5n0(u4N5$kF=yyPkOW zjoYt+JfpUiM61^%h>A(BEeKAG%4nx?he;y!SZ5V<)!xTGs_-mBtR zemXzp^W)+>Qin2AGB&^iB)DR~?^qNwqRpP+AZA$Wmf-wTmUr>^3Uzf9G>#RVAX_ai z2m6V3>t>t4FL=H_>{z#ce2=%%$tAv<{xJV>MAEdWoxE%9D!7vjc9Clbgo{xLGOP15 z@SdG%zp&EN>(tw2$MTN;`mX2T)<t8Md#HuwzLSTU&Yn4G)eSM?5(c{*)%0#KR zyJyB+ui8b(Q~8LTz!ZKhq-L!9P?}Ii*tvKf

=^l(w^45i*f@NizgaVnaVNAy1{` zrQW&{y5hmaT@;DAAwR8mO0o(n+D&V9k>YolH3+Q`;wKw2rDB#ZVe% z#LKG$F~*6glWm<;nT=gSK*(ViPd99D3U_ZVWQlYCq?AM*Ptsm}-3GSI zsljlFtFG+U9G%{qsje0h-&NWzKjFFLT7KuPqN2J|Z2h}G*nAyn9UcCjc0$c8N#|b7 z>QDlDiYmM8<{p_g|9Y6NHTV<#%A~4F4_jW7#X)T*5Bv6{%jNtnUahgdl;kJ)rQm_X zkhOGLP&s9Cpl-AxU`vN8g{?QRtK!>kl_NSemx*=DLx<18sAnOJ!>pzF?{~t+qN~i( zH%|=l>J>eRsBa*)vl<7w(XDHl&B?&_yLV-t{V73lPpA|qtNx%p+M&kIhr=YArNeN@ zcB}Ml1rblJ)+_zt?q~Ta-?37R7UM^o(;tkclM@;`VU z0tGqyC-V2@H(Zh^5zXVgrM}Q2w_?yG;=bKEl-c@!V8gmhr6?a3g;YO`^>Dji{5(GR zfPpf(Ir>ax*3Rrj?+pkigERLq99k!RJ5)qBu~);VFn5fXQrsWUuUZz?d!py#Iu87a z;(msYB{_Q+&By8%6WzW(iC!-#-IV`xP~e*0J8DlQ)rh>m)^3K6h9=psZ%cQ2Ojc}q z3dQ!bXk;lw=8x23zq9L z(k58enO)29+*KU3vFgvY(o}LJq8QEB@MXCucgCL3EqXSk8O$ov(f7TB$1bR9Us>Sq zN2VU4=suA)70;~W%a@t2Eh8kb@qgP4OgmORC`|78R@em39 zxZm%t{eTkAo82sQ$!DX<(xq~=0I#N#vVDpc8<{_jMCAY8d8u2B=9~Ct%c*J?Uj*(x zmAX@Zf33q`_fx5IBjZg>k3Yc{HPiMo$iB>VzR{{oZNK}T#ruH1!x6bIjdEx+VQQ6hls@IVPw3G5Z=m z>)nXkPLcV-)>~q?_gzFqx4jECHROaOC26Vcj#IRn2c56CY{@+>GR^zm+3&eoVsdrp z%`;~XiVt0D` z3A~gr7v|<*K1fY5X1$PX;N)z~RwE~F5JE$#t_Mmmz`La64K*wXoL+j?XS{6UvJh$si zNn#p$#iG;VkI+#HRs6)r&&^!|2{vf5+0Lm!Z<2_9B8P<_7*fIwO2TodH|O*6LV(rKIe3#8>UOLn`)R2FBX{|dDZb{ynp82Q&!5J>{Q%%c2u^QTb? zKywmM?;cfOzkdC+>6}Zxax&9e;cpc!MRR@Xg%q$#90gztqlc_TDI3G^)Bh!FCZ0pB zUZq|pBcpwP6-|6+5q8D>m7?5A-G7tO4HQBwlYaX)%gFe6Nlgu{WjW$7PKQI0Rt*0T zKl@ksPA&}!!YBMX8PHU5Ea`+FhNvi3XBo6dIEct4^g&Y~xk-AS{{*@rsNQQFBJ3Kd zg?Z51ug`$NU|pZW3F6%uAn{OIDSjUWF9N7StuEyP0QP`3o`GyBNWX!|@SnSncF?aN zfNXc4w|LM5WT66t2uYt2tt)*rNy2y72}2vf^@kYV*I)dmq5?-8w_*^u1P6b_(`bsd zEV$y3n8p5|hCkF+YOkYa@4D`K8kX`)W4dwnU7SQsC=V>&O!S(usoWx~$H%|?4Vr-xo2cKY=} zEtgo;m)*2YHG!yIkm|*MjWS5m0JsaOB|pdl$vRqf|MS=OPVcm~UPTrR(=Z6R=>Ihp zBT0kkf}0onLEPLBQs01mw>fJ)Dg%Pzk#UYZ+7LwiQ_;HiDyZ=g_(!GPdGKp6PI>V9 zy-PvM`T#f@#7hAx0r4H;rR2{^cM8gfg3LRi_q32_$XjF z!6ZKggp=0%Cj)gcsSo&>Tj}ZPGZ5LW0?AevZ3R7mnXB+_G16QANHHragi8O5Z-riY ztSeV;!x#lcKsI-va}=Zw^0C=AoM!@svxL?6z%eO8OD4!ib^G=&cfe^yfvEutR!ThX zMga7ZprA?hOyIEtJ39$W53`a_vS~;gopp)jE(gLx7gbJObfl`#p{fGG#F`+{GJF;n z=ob5-=7QSK8Q>(&y|QgS55}{WAr=Ahwuy8L;md<^hS2UEhbVajo*@c{H41;KJQ{3} z0MXP)JVpe-SCJ6uR=|{lQ0(FOXt_waNIaI!dv5fA0z|VF+=h*pgcD&{ESrPAn3$Lz z)MIslHK|0es|p<(3#@r1ib$aY7D?J{zEZlt0O_Le(V2z%92Dj~Dq%lspKhAm<x9MtLt&@&;K7tZtuS_I00 zzpm&@TF;$9K=TIUnD!8X5hKq{wNTPTH&;gDD+Igrqd-gGw{VW-E-6}Ke!961^5RWU z)!9(oDu@D}96$>?9r|~2`#J5|fQ0*Zmxa*NEt3I(flTY57EihGJ@W=wSK`^_Xt7G5 zqcl404iQ&!<3iJS^FlcGlpCZmm|4j4pLf!6GeAXEvUr}Ux%X`*COMEX93W2+MF=Tq z5TU{v=%c!2OCbJmLv{n&@ecuNVI6jXeWHS9FeisaEJdvyVnq_I*R_g?s3>rC;BClI zaI2lgLMcM)2Cj$=XP_bVDBwnhRc-JO4f)NiQs` z9$%hcgh;Cn=-XIE$bm*dgqn2yVT2U8I}L0Uz7ce;2R79XlP@9tEUq>GD26dE;W+q3 zXK;W4@h$5&sEtMm^V3TkBiw9$(4?>HPO!GkiqgKjNe)u0ln-GtHLz&`jF7;pIA-T& zcH-!q1<#Fi&j5M{LRcGxVrYE_4QC{1U1Ey%L$e5AT$;^(ZQ)-PRCBBkq-1}*#bG-R zb{c&8%~lZz&7%qk2@XB`Idz(Yz5R&GB3Ye5u@-D86e5_>jyQnM2qbJa5UFDvU^2lZ zx8??6M?oY+zPONUgZ3(6VfHziY4afMR$H4nhB5@VO@94)42uCRa@D`reITYQTlVphp+w>0!ez7w!)%0?L-vLf zLYhp|?nJoy`t|bGPMF+t1b0k6{X}FNTK_hM{;GfAts%uO2$=7vIy?aI!n~tCU%aS7 z+s@Z#TW*Rch)*yED5T>;hJ0KGn!A+Pf71X^?Jod0r(2M=B#aATpWZe7;4zbYcTSn~ zt7bP^T3T?&YE18x691x_iune_RB(Vs%gGY}Dbqhnt5Q;B+s75T(^(8jkvu^fSSp_73Mf|XAxGP2C>j8P%HW}P;AFTapuM$cOL-|NEBW+60;}tmN+ek zFaC%LH|H^S!wz6dJ?u&)pzS@l(wEvd$AYO^E<%*hiLP=K7BU>48?QIiCujkt-Wg0W z(FHLkg${b^Qj+gAmP8JN>v@ zzDJ}k8ii{AgOSOxGt-h~dO^akFOt1l5k{e(J> z=y4MyZ@{TZzZ?U^qyhTdL`?i)eEbZk64sv{G9xSneA!oYUx2K~SXb1SO`_JmB{rNl z53;#I5)g9M8_B=U&Q1#ik5K7|2t>F@jsWp3=>=2YSGKC-7U$;SIYYva4BVPH&35Di zV8Gf0%Arv5=FX-+KID&<36i2o8zOGu+{gsKPm158JH!*5ZUhn_Zs&TV;R zOfP_XZH_@N7qC;6un^q~7oL*24J$||&jMLNC88(j`o0LyRN3@Ramnu@P}pm78t3r zmdK=Y^YU6kT*@5T-dVKa3h2}K$SPo-jwO;tOYjPMaN%yl6@m|u7bU?hmJ32a5D=)$ z#PNq_`5KErNP)#m8Kp&&hz`To!U^T^_%rk3Djz9L@y~`7p5Ull$L;{u<;xYY_1geq zw1ms}^>*mo^7*PPZ&lpmBYU^ z%c7ipNjTJkO3mUCe6Y4;0o{(N31<=WoFUsr$p9LP-rxp!p0D6i4U0gcG659JXqD}s za%JIQt+wI{mh)CdYG#Q+Uk$2L=P6AEm$Q(y$bnNUXPc$GFbPuX8?aw`?ltfPXYND$ zuPcc}6!1?o7*Ix%H!w&DTZ4>}Kn~wLQ2?oTNF7W>9w@~HO(1X6M;gF3A|rO-GIXA) zqce_oYaQ_dE<_Y14@5Oh6La{U8;ed*s$!TumW7lZQ$p_Ib`vYMFtJzxXM$t`My`Wc zCIyo#<_2&r6b*dJh5%Iu_;c+T|3)xc2wE_)rNfN&DV_ySWdRWf(s>6t{2mUW`obhc zG67HH7=5!`##n)E1~+#+B7_Sq`4QNWJE$q8#3uT)91_vs%)$5%ZIXYQUh3a~j8y|p zE5$IGov7ubu^Wf5_;$SuAj=eQdOHgS;dK&FmOv}V$R#8$wgYnn;_3s0zweN;`{eiZ z{4s660r;c4z!+>;W_sfktevEt5YKvH-@XTz?;Br+CH|Pzr``j{cp8}wnS5a8J@Xe5 zlQCFidC@TX*!bA-Z2R>fFKF2>phFA=lHCOA3#K?A-t2Jgy7e{dM5R#J_iw5XQeQY| zWhIIH#S;cb1jE3W-ybB5&_fc-+qtq;kcUST$<(nt*NcAhIG2ey;?qZrketBvh^eSV z<48-;VHFIo8yfh0v$M15)2l)B1f)}ty2D8su&vjqTcqdM0;futTi86DreA^AG<;9* zy}D2IpFcOItR8$F#mLMii{y`J*%Npw=uX>cTQR8y@Uh05SElkAdnK+ygJd~8tZzGg=ld+GMGv{H{4Y%G&>O=5_le_OyU0E!lED+ zlspti0e1k&EI4X=v90idGskinLq?nU?YhZP8lMOp?j%8M2!y-)oFw?XDaU9f`dH_kq2Ds z5g3~T_t69CiGv?=-4Im4HJgCdiX66EuMt}sb?b%I4=deg2{XD zqjsc2I!+Eihi%A;wCyXKS1`(z@nI8q{C^C^W~pKSXY0yQI0rVBCGI`}aGNhu;XPdpG5eS1TuB za3i^Wzoc diff --git a/flyrs/perf.py b/flyrs/perf.py new file mode 100644 index 0000000000..7c58d9076a --- /dev/null +++ b/flyrs/perf.py @@ -0,0 +1,31 @@ +import timeit +import matplotlib.pyplot as plt + +setup = """ +from flytekit.remote import FlyteRemote; +from flytekit.configuration import Config; +PROJECT = "flytesnacks"; +DOMAIN = "development"; +remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN); +remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN); +""" + +fetch_task_in_py = """task_py = remote_py.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" +fetch_task_in_rs = """task_rs = remote_rs.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" + +r = 10 +Xs = [1, 10, 100, 1000] +py_elpased, rs_elpased = [], [] +for x in Xs: + # Python gRPC + py_elpased.append(sum(timeit.repeat(fetch_task_in_py, setup=setup, repeat=r, number=x))/r) + print() + # Rust gRPC + rs_elpased.append(sum(timeit.repeat(fetch_task_in_rs, setup=setup, repeat=r, number=x))/r) + print() +plt.xlabel('# of fetched tasks') +plt.ylabel('average elapsed time (s)') +plt.plot(Xs, py_elpased,'r-',label='Python gRPC') +plt.plot(Xs, rs_elpased,'b-',label='Rust gRPC') +plt.legend() +plt.savefig("perf.png") \ No newline at end of file diff --git a/flyrs/readme.md b/flyrs/readme.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml b/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml deleted file mode 100644 index ee15f0bffb..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "flyteidl" -version = "0.1.0" -edition = "2021" - -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html - -[dependencies] -prost = "0.12.3" -prost-types = "0.12.3" -tonic = "0.11.0" - -[lib] -name = "flyteidl" -path = "lib.rs" diff --git a/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs b/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs deleted file mode 100644 index 20bfd41962..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/datacatalog.rs +++ /dev/null @@ -1,957 +0,0 @@ -/// -/// Request message for creating a Dataset. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDatasetRequest { - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, -} -/// -/// Response message for creating a Dataset -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDatasetResponse {} -/// -/// Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier -/// which is a combination of several fields. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetDatasetRequest { - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, -} -/// -/// Response message for retrieving a Dataset. The response will include the metadata for the -/// Dataset. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetDatasetResponse { - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, -} -/// -/// Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that -/// can be one of artifact_id or tag. The result returned will include the artifact data and metadata -/// associated with the artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetArtifactRequest { - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, - #[prost(oneof = "get_artifact_request::QueryHandle", tags = "2, 3")] - pub query_handle: ::core::option::Option, -} -/// Nested message and enum types in `GetArtifactRequest`. -pub mod get_artifact_request { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum QueryHandle { - #[prost(string, tag = "2")] - ArtifactId(::prost::alloc::string::String), - #[prost(string, tag = "3")] - TagName(::prost::alloc::string::String), - } -} -/// -/// Response message for retrieving an Artifact. The result returned will include the artifact data -/// and metadata associated with the artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetArtifactResponse { - #[prost(message, optional, tag = "1")] - pub artifact: ::core::option::Option, -} -/// -/// Request message for creating an Artifact and its associated artifact Data. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateArtifactRequest { - #[prost(message, optional, tag = "1")] - pub artifact: ::core::option::Option, -} -/// -/// Response message for creating an Artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateArtifactResponse {} -/// -/// Request message for tagging an Artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AddTagRequest { - #[prost(message, optional, tag = "1")] - pub tag: ::core::option::Option, -} -/// -/// Response message for tagging an Artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AddTagResponse {} -/// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListArtifactsRequest { - /// Use a datasetID for which you want to retrieve the artifacts - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, - /// Apply the filter expression to this query - #[prost(message, optional, tag = "2")] - pub filter: ::core::option::Option, - /// Pagination options to get a page of artifacts - #[prost(message, optional, tag = "3")] - pub pagination: ::core::option::Option, -} -/// Response to list artifacts -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListArtifactsResponse { - /// The list of artifacts - #[prost(message, repeated, tag = "1")] - pub artifacts: ::prost::alloc::vec::Vec, - /// Token to use to request the next page, pass this into the next requests PaginationOptions - #[prost(string, tag = "2")] - pub next_token: ::prost::alloc::string::String, -} -/// List the datasets for the given query -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListDatasetsRequest { - /// Apply the filter expression to this query - #[prost(message, optional, tag = "1")] - pub filter: ::core::option::Option, - /// Pagination options to get a page of datasets - #[prost(message, optional, tag = "2")] - pub pagination: ::core::option::Option, -} -/// List the datasets response with token for next pagination -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListDatasetsResponse { - /// The list of datasets - #[prost(message, repeated, tag = "1")] - pub datasets: ::prost::alloc::vec::Vec, - /// Token to use to request the next page, pass this into the next requests PaginationOptions - #[prost(string, tag = "2")] - pub next_token: ::prost::alloc::string::String, -} -/// -/// Request message for updating an Artifact and overwriting its associated ArtifactData. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateArtifactRequest { - /// ID of dataset the artifact is associated with - #[prost(message, optional, tag = "1")] - pub dataset: ::core::option::Option, - /// List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing - /// ArtifactData entries will be removed from the underlying blob storage and database. - #[prost(message, repeated, tag = "4")] - pub data: ::prost::alloc::vec::Vec, - /// Update execution metadata(including execution domain, name, node, project data) when overwriting cache - #[prost(message, optional, tag = "5")] - pub metadata: ::core::option::Option, - /// Either ID of artifact or name of tag to retrieve existing artifact from - #[prost(oneof = "update_artifact_request::QueryHandle", tags = "2, 3")] - pub query_handle: ::core::option::Option, -} -/// Nested message and enum types in `UpdateArtifactRequest`. -pub mod update_artifact_request { - /// Either ID of artifact or name of tag to retrieve existing artifact from - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum QueryHandle { - #[prost(string, tag = "2")] - ArtifactId(::prost::alloc::string::String), - #[prost(string, tag = "3")] - TagName(::prost::alloc::string::String), - } -} -/// -/// Response message for updating an Artifact. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UpdateArtifactResponse { - /// The unique ID of the artifact updated - #[prost(string, tag = "1")] - pub artifact_id: ::prost::alloc::string::String, -} -/// -/// ReservationID message that is composed of several string fields. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReservationId { - /// The unique ID for the reserved dataset - #[prost(message, optional, tag = "1")] - pub dataset_id: ::core::option::Option, - /// The specific artifact tag for the reservation - #[prost(string, tag = "2")] - pub tag_name: ::prost::alloc::string::String, -} -/// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetOrExtendReservationRequest { - /// The unique ID for the reservation - #[prost(message, optional, tag = "1")] - pub reservation_id: ::core::option::Option, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, - /// Requested reservation extension heartbeat interval - #[prost(message, optional, tag = "3")] - pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, -} -/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Reservation { - /// The unique ID for the reservation - #[prost(message, optional, tag = "1")] - pub reservation_id: ::core::option::Option, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, - /// Recommended heartbeat interval to extend reservation - #[prost(message, optional, tag = "3")] - pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, - /// Expiration timestamp of this reservation - #[prost(message, optional, tag = "4")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, - /// Free-form metadata associated with the artifact - #[prost(message, optional, tag = "6")] - pub metadata: ::core::option::Option, -} -/// Response including either a newly minted reservation or the existing reservation -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetOrExtendReservationResponse { - /// The reservation to be acquired or extended - #[prost(message, optional, tag = "1")] - pub reservation: ::core::option::Option, -} -/// Request to release reservation -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReleaseReservationRequest { - /// The unique ID for the reservation - #[prost(message, optional, tag = "1")] - pub reservation_id: ::core::option::Option, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, -} -/// Response to release reservation -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReleaseReservationResponse {} -/// -/// Dataset message. It is uniquely identified by DatasetID. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Dataset { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub metadata: ::core::option::Option, - #[prost(string, repeated, tag = "3")] - pub partition_keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// -/// An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Partition { - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub value: ::prost::alloc::string::String, -} -/// -/// DatasetID message that is composed of several string fields. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DatasetId { - /// The name of the project - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// The name of the dataset - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, - /// The domain (eg. environment) - #[prost(string, tag = "3")] - pub domain: ::prost::alloc::string::String, - /// Version of the data schema - #[prost(string, tag = "4")] - pub version: ::prost::alloc::string::String, - /// UUID for the dataset (if set the above fields are optional) - #[prost(string, tag = "5")] - pub uuid: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "6")] - pub org: ::prost::alloc::string::String, -} -/// -/// Artifact message. It is composed of several string fields. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Artifact { - /// The unique ID of the artifact - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// The Dataset that the artifact belongs to - #[prost(message, optional, tag = "2")] - pub dataset: ::core::option::Option, - /// A list of data that is associated with the artifact - #[prost(message, repeated, tag = "3")] - pub data: ::prost::alloc::vec::Vec, - /// Free-form metadata associated with the artifact - #[prost(message, optional, tag = "4")] - pub metadata: ::core::option::Option, - #[prost(message, repeated, tag = "5")] - pub partitions: ::prost::alloc::vec::Vec, - #[prost(message, repeated, tag = "6")] - pub tags: ::prost::alloc::vec::Vec, - /// creation timestamp of artifact, autogenerated by service - #[prost(message, optional, tag = "7")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// -/// ArtifactData that belongs to an artifact -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactData { - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option, -} -/// -/// Tag message that is unique to a Dataset. It is associated to a single artifact and -/// can be retrieved by name later. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Tag { - /// Name of tag - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The tagged artifact - #[prost(string, tag = "2")] - pub artifact_id: ::prost::alloc::string::String, - /// The Dataset that this tag belongs to - #[prost(message, optional, tag = "3")] - pub dataset: ::core::option::Option, -} -/// -/// Metadata representation for artifacts and datasets -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Metadata { - /// key map is a dictionary of key/val strings that represent metadata - #[prost(map = "string, string", tag = "1")] - pub key_map: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Filter expression that is composed of a combination of single filters -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FilterExpression { - #[prost(message, repeated, tag = "1")] - pub filters: ::prost::alloc::vec::Vec, -} -/// A single property to filter on. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SinglePropertyFilter { - /// field 10 in case we add more entities to query - #[prost(enumeration = "single_property_filter::ComparisonOperator", tag = "10")] - pub operator: i32, - #[prost(oneof = "single_property_filter::PropertyFilter", tags = "1, 2, 3, 4")] - pub property_filter: ::core::option::Option, -} -/// Nested message and enum types in `SinglePropertyFilter`. -pub mod single_property_filter { - /// as use-cases come up we can add more operators, ex: gte, like, not eq etc. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ComparisonOperator { - Equals = 0, - } - impl ComparisonOperator { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ComparisonOperator::Equals => "EQUALS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "EQUALS" => Some(Self::Equals), - _ => None, - } - } - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum PropertyFilter { - #[prost(message, tag = "1")] - TagFilter(super::TagPropertyFilter), - #[prost(message, tag = "2")] - PartitionFilter(super::PartitionPropertyFilter), - #[prost(message, tag = "3")] - ArtifactFilter(super::ArtifactPropertyFilter), - #[prost(message, tag = "4")] - DatasetFilter(super::DatasetPropertyFilter), - } -} -/// Artifact properties we can filter by -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactPropertyFilter { - /// oneof because we can add more properties in the future - #[prost(oneof = "artifact_property_filter::Property", tags = "1")] - pub property: ::core::option::Option, -} -/// Nested message and enum types in `ArtifactPropertyFilter`. -pub mod artifact_property_filter { - /// oneof because we can add more properties in the future - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Property { - #[prost(string, tag = "1")] - ArtifactId(::prost::alloc::string::String), - } -} -/// Tag properties we can filter by -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TagPropertyFilter { - #[prost(oneof = "tag_property_filter::Property", tags = "1")] - pub property: ::core::option::Option, -} -/// Nested message and enum types in `TagPropertyFilter`. -pub mod tag_property_filter { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Property { - #[prost(string, tag = "1")] - TagName(::prost::alloc::string::String), - } -} -/// Partition properties we can filter by -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PartitionPropertyFilter { - #[prost(oneof = "partition_property_filter::Property", tags = "1")] - pub property: ::core::option::Option, -} -/// Nested message and enum types in `PartitionPropertyFilter`. -pub mod partition_property_filter { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Property { - #[prost(message, tag = "1")] - KeyVal(super::KeyValuePair), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct KeyValuePair { - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub value: ::prost::alloc::string::String, -} -/// Dataset properties we can filter by -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DatasetPropertyFilter { - #[prost(oneof = "dataset_property_filter::Property", tags = "1, 2, 3, 4, 5")] - pub property: ::core::option::Option, -} -/// Nested message and enum types in `DatasetPropertyFilter`. -pub mod dataset_property_filter { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Property { - #[prost(string, tag = "1")] - Project(::prost::alloc::string::String), - #[prost(string, tag = "2")] - Name(::prost::alloc::string::String), - #[prost(string, tag = "3")] - Domain(::prost::alloc::string::String), - #[prost(string, tag = "4")] - Version(::prost::alloc::string::String), - /// Optional, org key applied to the dataset. - #[prost(string, tag = "5")] - Org(::prost::alloc::string::String), - } -} -/// Pagination options for making list requests -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PaginationOptions { - /// the max number of results to return - #[prost(uint32, tag = "1")] - pub limit: u32, - /// the token to pass to fetch the next page - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, - /// the property that we want to sort the results by - #[prost(enumeration = "pagination_options::SortKey", tag = "3")] - pub sort_key: i32, - /// the sort order of the results - #[prost(enumeration = "pagination_options::SortOrder", tag = "4")] - pub sort_order: i32, -} -/// Nested message and enum types in `PaginationOptions`. -pub mod pagination_options { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum SortOrder { - Descending = 0, - Ascending = 1, - } - impl SortOrder { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - SortOrder::Descending => "DESCENDING", - SortOrder::Ascending => "ASCENDING", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DESCENDING" => Some(Self::Descending), - "ASCENDING" => Some(Self::Ascending), - _ => None, - } - } - } - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum SortKey { - CreationTime = 0, - } - impl SortKey { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - SortKey::CreationTime => "CREATION_TIME", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CREATION_TIME" => Some(Self::CreationTime), - _ => None, - } - } - } -} -/// Generated client implementations. -pub mod data_catalog_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// - /// Data Catalog service definition - /// Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. - /// Artifacts are associated with a Dataset, and can be tagged for retrieval. - #[derive(Debug, Clone)] - pub struct DataCatalogClient { - inner: tonic::client::Grpc, - } - impl DataCatalogClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl DataCatalogClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> DataCatalogClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - DataCatalogClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. - /// Each dataset can have one or more artifacts - pub async fn create_dataset( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/CreateDataset", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateDataset")); - self.inner.unary(req, path, codec).await - } - /// Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. - pub async fn get_dataset( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/GetDataset", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetDataset")); - self.inner.unary(req, path, codec).await - } - /// Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary - /// files or data values - pub async fn create_artifact( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/CreateArtifact", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "CreateArtifact")); - self.inner.unary(req, path, codec).await - } - /// Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. - pub async fn get_artifact( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/GetArtifact", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "GetArtifact")); - self.inner.unary(req, path, codec).await - } - /// Associate a tag with an artifact. Tags are unique within a Dataset. - pub async fn add_tag( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/AddTag", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "AddTag")); - self.inner.unary(req, path, codec).await - } - /// Return a paginated list of artifacts - pub async fn list_artifacts( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/ListArtifacts", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListArtifacts")); - self.inner.unary(req, path, codec).await - } - /// Return a paginated list of datasets - pub async fn list_datasets( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/ListDatasets", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "ListDatasets")); - self.inner.unary(req, path, codec).await - } - /// Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. - pub async fn update_artifact( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/UpdateArtifact", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("datacatalog.DataCatalog", "UpdateArtifact")); - self.inner.unary(req, path, codec).await - } - /// Attempts to get or extend a reservation for the corresponding artifact. If one already exists - /// (ie. another entity owns the reservation) then that reservation is retrieved. - /// Once you acquire a reservation, you need to periodically extend the reservation with an - /// identical call. If the reservation is not extended before the defined expiration, it may be - /// acquired by another task. - /// Note: We may have multiple concurrent tasks with the same signature and the same input that - /// try to populate the same artifact at the same time. Thus with reservation, only one task can - /// run at a time, until the reservation expires. - /// Note: If task A does not extend the reservation in time and the reservation expires, another - /// task B may take over the reservation, resulting in two tasks A and B running in parallel. So - /// a third task C may get the Artifact from A or B, whichever writes last. - pub async fn get_or_extend_reservation( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/GetOrExtendReservation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("datacatalog.DataCatalog", "GetOrExtendReservation"), - ); - self.inner.unary(req, path, codec).await - } - /// Release the reservation when the task holding the spot fails so that the other tasks - /// can grab the spot. - pub async fn release_reservation( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/datacatalog.DataCatalog/ReleaseReservation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("datacatalog.DataCatalog", "ReleaseReservation"), - ); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs deleted file mode 100644 index 145fd6500b..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.admin.rs +++ /dev/null @@ -1,3341 +0,0 @@ -/// Encapsulation of fields that identifies a Flyte resource. -/// A Flyte resource can be a task, workflow or launch plan. -/// A resource can internally have multiple versions and is uniquely identified -/// by project, domain, and name. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityIdentifier { - /// Name of the project the resource belongs to. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the resource belongs to. - /// A domain can be considered as a subset within a specific project. - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// User provided value for the resource. - /// The combination of project + domain + name uniquely identifies the resource. - /// +optional - in certain contexts - like 'List API', 'Launch plans' - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "4")] - pub org: ::prost::alloc::string::String, -} -/// Additional metadata around a named entity. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityMetadata { - /// Common description across all versions of the entity - /// +optional - #[prost(string, tag = "1")] - pub description: ::prost::alloc::string::String, - /// Shared state across all version of the entity - /// At this point in time, only workflow entities can have their state archived. - #[prost(enumeration = "NamedEntityState", tag = "2")] - pub state: i32, -} -/// Encapsulates information common to a NamedEntity, a Flyte resource such as a task, -/// workflow or launch plan. A NamedEntity is exclusively identified by its resource type -/// and identifier. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntity { - /// Resource type of the named entity. One of Task, Workflow or LaunchPlan. - #[prost(enumeration = "super::core::ResourceType", tag = "1")] - pub resource_type: i32, - #[prost(message, optional, tag = "2")] - pub id: ::core::option::Option, - /// Additional metadata around a named entity. - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, -} -/// Specifies sort ordering in a list request. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Sort { - /// Indicates an attribute to sort the response values. - /// +required - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// Indicates the direction to apply sort key for response values. - /// +optional - #[prost(enumeration = "sort::Direction", tag = "2")] - pub direction: i32, -} -/// Nested message and enum types in `Sort`. -pub mod sort { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Direction { - /// By default, fields are sorted in descending order. - Descending = 0, - Ascending = 1, - } - impl Direction { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Direction::Descending => "DESCENDING", - Direction::Ascending => "ASCENDING", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DESCENDING" => Some(Self::Descending), - "ASCENDING" => Some(Self::Ascending), - _ => None, - } - } - } -} -/// Represents a request structure to list NamedEntityIdentifiers. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityIdentifierListRequest { - /// Name of the project that contains the identifiers. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the identifiers belongs to within the project. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "3")] - pub limit: u32, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "4")] - pub token: ::prost::alloc::string::String, - /// Specifies how listed entities should be sorted in the response. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, - /// Indicates a list of filters passed as string. - /// +optional - #[prost(string, tag = "6")] - pub filters: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "7")] - pub org: ::prost::alloc::string::String, -} -/// Represents a request structure to list NamedEntity objects -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityListRequest { - /// Resource type of the metadata to query. One of Task, Workflow or LaunchPlan. - /// +required - #[prost(enumeration = "super::core::ResourceType", tag = "1")] - pub resource_type: i32, - /// Name of the project that contains the identifiers. - /// +required - #[prost(string, tag = "2")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the identifiers belongs to within the project. - #[prost(string, tag = "3")] - pub domain: ::prost::alloc::string::String, - /// Indicates the number of resources to be returned. - #[prost(uint32, tag = "4")] - pub limit: u32, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "5")] - pub token: ::prost::alloc::string::String, - /// Specifies how listed entities should be sorted in the response. - /// +optional - #[prost(message, optional, tag = "6")] - pub sort_by: ::core::option::Option, - /// Indicates a list of filters passed as string. - /// +optional - #[prost(string, tag = "7")] - pub filters: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "8")] - pub org: ::prost::alloc::string::String, -} -/// Represents a list of NamedEntityIdentifiers. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityIdentifierList { - /// A list of identifiers. - #[prost(message, repeated, tag = "1")] - pub entities: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Represents a list of NamedEntityIdentifiers. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityList { - /// A list of NamedEntity objects - #[prost(message, repeated, tag = "1")] - pub entities: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// A request to retrieve the metadata associated with a NamedEntityIdentifier -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityGetRequest { - /// Resource type of the metadata to get. One of Task, Workflow or LaunchPlan. - /// +required - #[prost(enumeration = "super::core::ResourceType", tag = "1")] - pub resource_type: i32, - /// The identifier for the named entity for which to fetch metadata. - /// +required - #[prost(message, optional, tag = "2")] - pub id: ::core::option::Option, -} -/// Request to set the referenced named entity state to the configured value. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityUpdateRequest { - /// Resource type of the metadata to update - /// +required - #[prost(enumeration = "super::core::ResourceType", tag = "1")] - pub resource_type: i32, - /// Identifier of the metadata to update - /// +required - #[prost(message, optional, tag = "2")] - pub id: ::core::option::Option, - /// Metadata object to set as the new value - /// +required - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NamedEntityUpdateResponse {} -/// Shared request structure to fetch a single resource. -/// Resources include: Task, Workflow, LaunchPlan -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ObjectGetRequest { - /// Indicates a unique version of resource. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Shared request structure to retrieve a list of resources. -/// Resources include: Task, Workflow, LaunchPlan -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ResourceListRequest { - /// id represents the unique identifier of the resource. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "2")] - pub limit: u32, - /// In the case of multiple pages of results, this server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "3")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "4")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, -} -/// Defines an email notification specification. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EmailNotification { - /// The list of email addresses recipients for this notification. - /// +required - #[prost(string, repeated, tag = "1")] - pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Defines a pager duty notification specification. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PagerDutyNotification { - /// Currently, PagerDuty notifications leverage email to trigger a notification. - /// +required - #[prost(string, repeated, tag = "1")] - pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Defines a slack notification specification. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SlackNotification { - /// Currently, Slack notifications leverage email to trigger a notification. - /// +required - #[prost(string, repeated, tag = "1")] - pub recipients_email: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Represents a structure for notifications based on execution status. -/// The notification content is configured within flyte admin but can be templatized. -/// Future iterations could expose configuring notifications with custom content. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Notification { - /// A list of phases to which users can associate the notifications to. - /// +required - #[prost(enumeration = "super::core::workflow_execution::Phase", repeated, tag = "1")] - pub phases: ::prost::alloc::vec::Vec, - /// The type of notification to trigger. - /// +required - #[prost(oneof = "notification::Type", tags = "2, 3, 4")] - pub r#type: ::core::option::Option, -} -/// Nested message and enum types in `Notification`. -pub mod notification { - /// The type of notification to trigger. - /// +required - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Type { - #[prost(message, tag = "2")] - Email(super::EmailNotification), - #[prost(message, tag = "3")] - PagerDuty(super::PagerDutyNotification), - #[prost(message, tag = "4")] - Slack(super::SlackNotification), - } -} -/// Represents a string url and associated metadata used throughout the platform. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UrlBlob { - /// Actual url value. - #[prost(string, tag = "1")] - pub url: ::prost::alloc::string::String, - /// Represents the size of the file accessible at the above url. - #[prost(int64, tag = "2")] - pub bytes: i64, -} -/// Label values to be applied to an execution resource. -/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -/// to specify how to merge labels defined at registration and execution time. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Labels { - /// Map of custom labels to be applied to the execution resource. - #[prost(map = "string, string", tag = "1")] - pub values: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Annotation values to be applied to an execution resource. -/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -/// to specify how to merge annotations defined at registration and execution time. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Annotations { - /// Map of custom annotations to be applied to the execution resource. - #[prost(map = "string, string", tag = "1")] - pub values: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Environment variable values to be applied to an execution resource. -/// In the future a mode (e.g. OVERRIDE, APPEND, etc) can be defined -/// to specify how to merge environment variables defined at registration and execution time. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Envs { - /// Map of custom environment variables to be applied to the execution resource. - #[prost(message, repeated, tag = "1")] - pub values: ::prost::alloc::vec::Vec, -} -/// Defines permissions associated with executions created by this launch plan spec. -/// Use either of these roles when they have permissions required by your workflow execution. -/// Deprecated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AuthRole { - /// Defines an optional iam role which will be used for tasks run in executions created with this launch plan. - #[prost(string, tag = "1")] - pub assumable_iam_role: ::prost::alloc::string::String, - /// Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. - #[prost(string, tag = "2")] - pub kubernetes_service_account: ::prost::alloc::string::String, -} -/// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). -/// See for more background information. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RawOutputDataConfig { - /// Prefix for where offloaded data from user workflows will be written - /// e.g. s3://bucket/key or s3://bucket/ - #[prost(string, tag = "1")] - pub output_location_prefix: ::prost::alloc::string::String, -} -/// These URLs are returned as part of node and task execution data requests. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FlyteUrLs { - #[prost(string, tag = "1")] - pub inputs: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub outputs: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub deck: ::prost::alloc::string::String, -} -/// The status of the named entity is used to control its visibility in the UI. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum NamedEntityState { - /// By default, all named entities are considered active and under development. - NamedEntityActive = 0, - /// Archived named entities are no longer visible in the UI. - NamedEntityArchived = 1, - /// System generated entities that aren't explicitly created or managed by a user. - SystemGenerated = 2, -} -impl NamedEntityState { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - NamedEntityState::NamedEntityActive => "NAMED_ENTITY_ACTIVE", - NamedEntityState::NamedEntityArchived => "NAMED_ENTITY_ARCHIVED", - NamedEntityState::SystemGenerated => "SYSTEM_GENERATED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "NAMED_ENTITY_ACTIVE" => Some(Self::NamedEntityActive), - "NAMED_ENTITY_ARCHIVED" => Some(Self::NamedEntityArchived), - "SYSTEM_GENERATED" => Some(Self::SystemGenerated), - _ => None, - } - } -} -/// SignalGetOrCreateRequest represents a request structure to retrieve or create a signal. -/// See :ref:`ref_flyteidl.admin.Signal` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalGetOrCreateRequest { - /// A unique identifier for the requested signal. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// A type denoting the required value type for this signal. - #[prost(message, optional, tag = "2")] - pub r#type: ::core::option::Option, -} -/// SignalListRequest represents a request structure to retrieve a collection of signals. -/// See :ref:`ref_flyteidl.admin.Signal` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalListRequest { - /// Indicates the workflow execution to filter by. - /// +required - #[prost(message, optional, tag = "1")] - pub workflow_execution_id: ::core::option::Option< - super::core::WorkflowExecutionIdentifier, - >, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "2")] - pub limit: u32, - /// In the case of multiple pages of results, the, server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "3")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// +optional - #[prost(string, tag = "4")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, -} -/// SignalList represents collection of signals along with the token of the last result. -/// See :ref:`ref_flyteidl.admin.Signal` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalList { - /// A list of signals matching the input filters. - #[prost(message, repeated, tag = "1")] - pub signals: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// SignalSetRequest represents a request structure to set the value on a signal. Setting a signal -/// effetively satisfies the signal condition within a Flyte workflow. -/// See :ref:`ref_flyteidl.admin.Signal` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalSetRequest { - /// A unique identifier for the requested signal. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// The value of this signal, must match the defining signal type. - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option, -} -/// SignalSetResponse represents a response structure if signal setting succeeds. -/// -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalSetResponse {} -/// Signal encapsulates a unique identifier, associated metadata, and a value for a single Flyte -/// signal. Signals may exist either without a set value (representing a signal request) or with a -/// populated value (indicating the signal has been given). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Signal { - /// A unique identifier for the requested signal. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// A type denoting the required value type for this signal. - #[prost(message, optional, tag = "2")] - pub r#type: ::core::option::Option, - /// The value of the signal. This is only available if the signal has been "set" and must match - /// the defined the type. - #[prost(message, optional, tag = "3")] - pub value: ::core::option::Option, -} -/// Represents a subset of runtime task execution metadata that are relevant to external plugins. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionMetadata { - /// ID of the task execution - #[prost(message, optional, tag = "1")] - pub task_execution_id: ::core::option::Option, - /// k8s namespace where the task is executed in - #[prost(string, tag = "2")] - pub namespace: ::prost::alloc::string::String, - /// Labels attached to the task execution - #[prost(map = "string, string", tag = "3")] - pub labels: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Annotations attached to the task execution - #[prost(map = "string, string", tag = "4")] - pub annotations: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// k8s service account associated with the task execution - #[prost(string, tag = "5")] - pub k8s_service_account: ::prost::alloc::string::String, - /// Environment variables attached to the task execution - #[prost(map = "string, string", tag = "6")] - pub environment_variables: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Represents the maximum number of attempts allowed for a task. - /// If a task fails, it can be retried up to this maximum number of attempts. - #[prost(int32, tag = "7")] - pub max_attempts: i32, - /// Indicates whether the task execution can be interrupted. - /// If set to true, the task can be stopped before completion. - #[prost(bool, tag = "8")] - pub interruptible: bool, - /// Specifies the threshold for failure count at which the interruptible property - /// will take effect. If the number of consecutive task failures exceeds this threshold, - /// interruptible behavior will be activated. - #[prost(int32, tag = "9")] - pub interruptible_failure_threshold: i32, - /// Overrides for specific properties of the task node. - /// These overrides can be used to customize the behavior of the task node. - #[prost(message, optional, tag = "10")] - pub overrides: ::core::option::Option, -} -/// Represents a request structure to create task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateTaskRequest { - /// The inputs required to start the execution. All required inputs must be - /// included in this map. If not required and not provided, defaults apply. - /// +optional - #[prost(message, optional, tag = "1")] - pub inputs: ::core::option::Option, - /// Template of the task that encapsulates all the metadata of the task. - #[prost(message, optional, tag = "2")] - pub template: ::core::option::Option, - /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - #[prost(string, tag = "3")] - pub output_prefix: ::prost::alloc::string::String, - /// subset of runtime task execution metadata. - #[prost(message, optional, tag = "4")] - pub task_execution_metadata: ::core::option::Option, -} -/// Represents a create response structure. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateTaskResponse { - /// ResourceMeta is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - #[prost(bytes = "vec", tag = "1")] - pub resource_meta: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateRequestHeader { - /// Template of the task that encapsulates all the metadata of the task. - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, - /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - #[prost(string, tag = "2")] - pub output_prefix: ::prost::alloc::string::String, - /// subset of runtime task execution metadata. - #[prost(message, optional, tag = "3")] - pub task_execution_metadata: ::core::option::Option, - /// MaxDatasetSizeBytes is the maximum size of the dataset that can be generated by the task. - #[prost(int64, tag = "4")] - pub max_dataset_size_bytes: i64, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteTaskSyncRequest { - #[prost(oneof = "execute_task_sync_request::Part", tags = "1, 2")] - pub part: ::core::option::Option, -} -/// Nested message and enum types in `ExecuteTaskSyncRequest`. -pub mod execute_task_sync_request { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Part { - #[prost(message, tag = "1")] - Header(super::CreateRequestHeader), - #[prost(message, tag = "2")] - Inputs(super::super::core::LiteralMap), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteTaskSyncResponseHeader { - #[prost(message, optional, tag = "1")] - pub resource: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecuteTaskSyncResponse { - /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - /// Resource is for synchronous task execution. - #[prost(oneof = "execute_task_sync_response::Res", tags = "1, 2")] - pub res: ::core::option::Option, -} -/// Nested message and enum types in `ExecuteTaskSyncResponse`. -pub mod execute_task_sync_response { - /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - /// Resource is for synchronous task execution. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Res { - #[prost(message, tag = "1")] - Header(super::ExecuteTaskSyncResponseHeader), - #[prost(message, tag = "2")] - Outputs(super::super::core::LiteralMap), - } -} -/// A message used to fetch a job resource from flyte agent server. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskRequest { - /// A predefined yet extensible Task type identifier. - #[deprecated] - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// Metadata about the resource to be pass to the agent. - #[prost(bytes = "vec", tag = "2")] - pub resource_meta: ::prost::alloc::vec::Vec, - /// A predefined yet extensible Task type identifier. - #[prost(message, optional, tag = "3")] - pub task_category: ::core::option::Option, -} -/// Response to get an individual task resource. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskResponse { - #[prost(message, optional, tag = "1")] - pub resource: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Resource { - /// DEPRECATED. The state of the execution is used to control its visibility in the UI/CLI. - #[deprecated] - #[prost(enumeration = "State", tag = "1")] - pub state: i32, - /// The outputs of the execution. It's typically used by sql task. Agent service will create a - /// Structured dataset pointing to the query result table. - /// +optional - #[prost(message, optional, tag = "2")] - pub outputs: ::core::option::Option, - /// A descriptive message for the current state. e.g. waiting for cluster. - #[prost(string, tag = "3")] - pub message: ::prost::alloc::string::String, - /// log information for the task execution. - #[prost(message, repeated, tag = "4")] - pub log_links: ::prost::alloc::vec::Vec, - /// The phase of the execution is used to determine the phase of the plugin's execution. - #[prost(enumeration = "super::core::task_execution::Phase", tag = "5")] - pub phase: i32, - /// Custom data specific to the agent. - #[prost(message, optional, tag = "6")] - pub custom_info: ::core::option::Option<::prost_types::Struct>, -} -/// A message used to delete a task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteTaskRequest { - /// A predefined yet extensible Task type identifier. - #[deprecated] - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// Metadata about the resource to be pass to the agent. - #[prost(bytes = "vec", tag = "2")] - pub resource_meta: ::prost::alloc::vec::Vec, - /// A predefined yet extensible Task type identifier. - #[prost(message, optional, tag = "3")] - pub task_category: ::core::option::Option, -} -/// Response to delete a task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteTaskResponse {} -/// A message containing the agent metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Agent { - /// Name is the developer-assigned name of the agent. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// SupportedTaskTypes are the types of the tasks that the agent can handle. - #[deprecated] - #[prost(string, repeated, tag = "2")] - pub supported_task_types: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// IsSync indicates whether this agent is a sync agent. Sync agents are expected to return their - /// results synchronously when called by propeller. Given that sync agents can affect the performance - /// of the system, it's important to enforce strict timeout policies. - /// An Async agent, on the other hand, is required to be able to identify jobs by an - /// identifier and query for job statuses as jobs progress. - #[prost(bool, tag = "3")] - pub is_sync: bool, - /// Supported_task_categories are the categories of the tasks that the agent can handle. - #[prost(message, repeated, tag = "4")] - pub supported_task_categories: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskCategory { - /// The name of the task type. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The version of the task type. - #[prost(int32, tag = "2")] - pub version: i32, -} -/// A request to get an agent. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetAgentRequest { - /// The name of the agent. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, -} -/// A response containing an agent. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetAgentResponse { - #[prost(message, optional, tag = "1")] - pub agent: ::core::option::Option, -} -/// A request to list all agents. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListAgentsRequest {} -/// A response containing a list of agents. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListAgentsResponse { - #[prost(message, repeated, tag = "1")] - pub agents: ::prost::alloc::vec::Vec, -} -/// A request to get the metrics from a task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskMetricsRequest { - /// A predefined yet extensible Task type identifier. - #[deprecated] - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - #[prost(bytes = "vec", tag = "2")] - pub resource_meta: ::prost::alloc::vec::Vec, - /// The metrics to query. If empty, will return a default set of metrics. - /// e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG - #[prost(string, repeated, tag = "3")] - pub queries: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Start timestamp, inclusive. - #[prost(message, optional, tag = "4")] - pub start_time: ::core::option::Option<::prost_types::Timestamp>, - /// End timestamp, inclusive.. - #[prost(message, optional, tag = "5")] - pub end_time: ::core::option::Option<::prost_types::Timestamp>, - /// Query resolution step width in duration format or float number of seconds. - #[prost(message, optional, tag = "6")] - pub step: ::core::option::Option<::prost_types::Duration>, - /// A predefined yet extensible Task type identifier. - #[prost(message, optional, tag = "7")] - pub task_category: ::core::option::Option, -} -/// A response containing a list of metrics for a task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskMetricsResponse { - /// The execution metric results. - #[prost(message, repeated, tag = "1")] - pub results: ::prost::alloc::vec::Vec, -} -/// A request to get the log from a task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskLogsRequest { - /// A predefined yet extensible Task type identifier. - #[deprecated] - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// Metadata is created by the agent. It could be a string (jobId) or a dict (more complex metadata). - #[prost(bytes = "vec", tag = "2")] - pub resource_meta: ::prost::alloc::vec::Vec, - /// Number of lines to return. - #[prost(uint64, tag = "3")] - pub lines: u64, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "4")] - pub token: ::prost::alloc::string::String, - /// A predefined yet extensible Task type identifier. - #[prost(message, optional, tag = "5")] - pub task_category: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskLogsResponseHeader { - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "1")] - pub token: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskLogsResponseBody { - /// The execution log results. - #[prost(string, repeated, tag = "1")] - pub results: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// A response containing the logs for a task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetTaskLogsResponse { - #[prost(oneof = "get_task_logs_response::Part", tags = "1, 2")] - pub part: ::core::option::Option, -} -/// Nested message and enum types in `GetTaskLogsResponse`. -pub mod get_task_logs_response { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Part { - #[prost(message, tag = "1")] - Header(super::GetTaskLogsResponseHeader), - #[prost(message, tag = "2")] - Body(super::GetTaskLogsResponseBody), - } -} -/// The state of the execution is used to control its visibility in the UI/CLI. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum State { - RetryableFailure = 0, - PermanentFailure = 1, - Pending = 2, - Running = 3, - Succeeded = 4, -} -impl State { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - State::RetryableFailure => "RETRYABLE_FAILURE", - State::PermanentFailure => "PERMANENT_FAILURE", - State::Pending => "PENDING", - State::Running => "RUNNING", - State::Succeeded => "SUCCEEDED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "RETRYABLE_FAILURE" => Some(Self::RetryableFailure), - "PERMANENT_FAILURE" => Some(Self::PermanentFailure), - "PENDING" => Some(Self::Pending), - "RUNNING" => Some(Self::Running), - "SUCCEEDED" => Some(Self::Succeeded), - _ => None, - } - } -} -/// Namespace within a project commonly used to differentiate between different service instances. -/// e.g. "production", "development", etc. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Domain { - /// Globally unique domain name. - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Display name. - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, -} -/// Top-level namespace used to classify different entities like workflows and executions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Project { - /// Globally unique project name. - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Display name. - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, - #[prost(message, repeated, tag = "3")] - pub domains: ::prost::alloc::vec::Vec, - #[prost(string, tag = "4")] - pub description: ::prost::alloc::string::String, - /// Leverage Labels from flyteidl.admin.common.proto to - /// tag projects with ownership information. - #[prost(message, optional, tag = "5")] - pub labels: ::core::option::Option, - #[prost(enumeration = "project::ProjectState", tag = "6")] - pub state: i32, - /// Optional, org key applied to the resource. - #[prost(string, tag = "7")] - pub org: ::prost::alloc::string::String, -} -/// Nested message and enum types in `Project`. -pub mod project { - /// The state of the project is used to control its visibility in the UI and validity. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ProjectState { - /// By default, all projects are considered active. - Active = 0, - /// Archived projects are no longer visible in the UI and no longer valid. - Archived = 1, - /// System generated projects that aren't explicitly created or managed by a user. - SystemGenerated = 2, - } - impl ProjectState { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ProjectState::Active => "ACTIVE", - ProjectState::Archived => "ARCHIVED", - ProjectState::SystemGenerated => "SYSTEM_GENERATED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "ACTIVE" => Some(Self::Active), - "ARCHIVED" => Some(Self::Archived), - "SYSTEM_GENERATED" => Some(Self::SystemGenerated), - _ => None, - } - } - } -} -/// Represents a list of projects. -/// See :ref:`ref_flyteidl.admin.Project` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Projects { - #[prost(message, repeated, tag = "1")] - pub projects: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Request to retrieve a list of projects matching specified filters. -/// See :ref:`ref_flyteidl.admin.Project` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectListRequest { - /// Indicates the number of projects to be returned. - /// +required - #[prost(uint32, tag = "1")] - pub limit: u32, - /// In the case of multiple pages of results, this server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "3")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "4")] - pub sort_by: ::core::option::Option, - /// Optional, org filter applied to list project requests. - #[prost(string, tag = "5")] - pub org: ::prost::alloc::string::String, -} -/// Adds a new user-project within the Flyte deployment. -/// See :ref:`ref_flyteidl.admin.Project` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectRegisterRequest { - /// +required - #[prost(message, optional, tag = "1")] - pub project: ::core::option::Option, -} -/// Purposefully empty, may be updated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectRegisterResponse {} -/// Purposefully empty, may be updated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectUpdateResponse {} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectGetRequest { - /// Indicates a unique project. - /// +required - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "2")] - pub org: ::prost::alloc::string::String, -} -/// Encapsulates specifications for routing an execution onto a specific cluster. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ClusterAssignment { - #[prost(string, tag = "3")] - pub cluster_pool_name: ::prost::alloc::string::String, -} -/// Defines a set of overridable task resource attributes set during task registration. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskResourceSpec { - #[prost(string, tag = "1")] - pub cpu: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub gpu: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub memory: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub storage: ::prost::alloc::string::String, - #[prost(string, tag = "5")] - pub ephemeral_storage: ::prost::alloc::string::String, -} -/// Defines task resource defaults and limits that will be applied at task registration. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskResourceAttributes { - #[prost(message, optional, tag = "1")] - pub defaults: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub limits: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ClusterResourceAttributes { - /// Custom resource attributes which will be applied in cluster resource creation (e.g. quotas). - /// Map keys are the *case-sensitive* names of variables in templatized resource files. - /// Map values should be the custom values which get substituted during resource creation. - #[prost(map = "string, string", tag = "1")] - pub attributes: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionQueueAttributes { - /// Tags used for assigning execution queues for tasks defined within this project. - #[prost(string, repeated, tag = "1")] - pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionClusterLabel { - /// Label value to determine where the execution will be run - #[prost(string, tag = "1")] - pub value: ::prost::alloc::string::String, -} -/// This MatchableAttribute configures selecting alternate plugin implementations for a given task type. -/// In addition to an override implementation a selection of fallbacks can be provided or other modes -/// for handling cases where the desired plugin override is not enabled in a given Flyte deployment. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PluginOverride { - /// A predefined yet extensible Task type identifier. - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// A set of plugin ids which should handle tasks of this type instead of the default registered plugin. The list will be tried in order until a plugin is found with that id. - #[prost(string, repeated, tag = "2")] - pub plugin_id: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Defines the behavior when no plugin from the plugin_id list is not found. - #[prost(enumeration = "plugin_override::MissingPluginBehavior", tag = "4")] - pub missing_plugin_behavior: i32, -} -/// Nested message and enum types in `PluginOverride`. -pub mod plugin_override { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum MissingPluginBehavior { - /// By default, if this plugin is not enabled for a Flyte deployment then execution will fail. - Fail = 0, - /// Uses the system-configured default implementation. - UseDefault = 1, - } - impl MissingPluginBehavior { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - MissingPluginBehavior::Fail => "FAIL", - MissingPluginBehavior::UseDefault => "USE_DEFAULT", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FAIL" => Some(Self::Fail), - "USE_DEFAULT" => Some(Self::UseDefault), - _ => None, - } - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PluginOverrides { - #[prost(message, repeated, tag = "1")] - pub overrides: ::prost::alloc::vec::Vec, -} -/// Adds defaults for customizable workflow-execution specifications and overrides. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionConfig { - /// Can be used to control the number of parallel nodes to run within the workflow. This is useful to achieve fairness. - #[prost(int32, tag = "1")] - pub max_parallelism: i32, - /// Indicates security context permissions for executions triggered with this matchable attribute. - #[prost(message, optional, tag = "2")] - pub security_context: ::core::option::Option, - /// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). - #[prost(message, optional, tag = "3")] - pub raw_output_data_config: ::core::option::Option, - /// Custom labels to be applied to a triggered execution resource. - #[prost(message, optional, tag = "4")] - pub labels: ::core::option::Option, - /// Custom annotations to be applied to a triggered execution resource. - #[prost(message, optional, tag = "5")] - pub annotations: ::core::option::Option, - /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. - /// Omitting this field uses the workflow's value as a default. - /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - /// around the bool field. - #[prost(message, optional, tag = "6")] - pub interruptible: ::core::option::Option, - /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored - /// data once execution finishes successfully. - #[prost(bool, tag = "7")] - pub overwrite_cache: bool, - /// Environment variables to be set for the execution. - #[prost(message, optional, tag = "8")] - pub envs: ::core::option::Option, -} -/// Generic container for encapsulating all types of the above attributes messages. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MatchingAttributes { - #[prost(oneof = "matching_attributes::Target", tags = "1, 2, 3, 4, 5, 6, 7, 8")] - pub target: ::core::option::Option, -} -/// Nested message and enum types in `MatchingAttributes`. -pub mod matching_attributes { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Target { - #[prost(message, tag = "1")] - TaskResourceAttributes(super::TaskResourceAttributes), - #[prost(message, tag = "2")] - ClusterResourceAttributes(super::ClusterResourceAttributes), - #[prost(message, tag = "3")] - ExecutionQueueAttributes(super::ExecutionQueueAttributes), - #[prost(message, tag = "4")] - ExecutionClusterLabel(super::ExecutionClusterLabel), - #[prost(message, tag = "5")] - QualityOfService(super::super::core::QualityOfService), - #[prost(message, tag = "6")] - PluginOverrides(super::PluginOverrides), - #[prost(message, tag = "7")] - WorkflowExecutionConfig(super::WorkflowExecutionConfig), - #[prost(message, tag = "8")] - ClusterAssignment(super::ClusterAssignment), - } -} -/// Represents a custom set of attributes applied for either a domain (and optional org); a domain and project (and optional org); -/// or domain, project and workflow name (and optional org). -/// These are used to override system level defaults for kubernetes cluster resource management, -/// default execution values, and more all across different levels of specificity. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct MatchableAttributesConfiguration { - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub project: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub workflow: ::prost::alloc::string::String, - #[prost(string, tag = "5")] - pub launch_plan: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "6")] - pub org: ::prost::alloc::string::String, -} -/// Request all matching resource attributes for a resource type. -/// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListMatchableAttributesRequest { - /// +required - #[prost(enumeration = "MatchableResource", tag = "1")] - pub resource_type: i32, - /// Optional, org filter applied to list project requests. - #[prost(string, tag = "2")] - pub org: ::prost::alloc::string::String, -} -/// Response for a request for all matching resource attributes for a resource type. -/// See :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ListMatchableAttributesResponse { - #[prost(message, repeated, tag = "1")] - pub configurations: ::prost::alloc::vec::Vec, -} -/// Defines a resource that can be configured by customizable Project-, ProjectDomain- or WorkflowAttributes -/// based on matching tags. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum MatchableResource { - /// Applies to customizable task resource requests and limits. - TaskResource = 0, - /// Applies to configuring templated kubernetes cluster resources. - ClusterResource = 1, - /// Configures task and dynamic task execution queue assignment. - ExecutionQueue = 2, - /// Configures the K8s cluster label to be used for execution to be run - ExecutionClusterLabel = 3, - /// Configures default quality of service when undefined in an execution spec. - QualityOfServiceSpecification = 4, - /// Selects configurable plugin implementation behavior for a given task type. - PluginOverride = 5, - /// Adds defaults for customizable workflow-execution specifications and overrides. - WorkflowExecutionConfig = 6, - /// Controls how to select an available cluster on which this execution should run. - ClusterAssignment = 7, -} -impl MatchableResource { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - MatchableResource::TaskResource => "TASK_RESOURCE", - MatchableResource::ClusterResource => "CLUSTER_RESOURCE", - MatchableResource::ExecutionQueue => "EXECUTION_QUEUE", - MatchableResource::ExecutionClusterLabel => "EXECUTION_CLUSTER_LABEL", - MatchableResource::QualityOfServiceSpecification => { - "QUALITY_OF_SERVICE_SPECIFICATION" - } - MatchableResource::PluginOverride => "PLUGIN_OVERRIDE", - MatchableResource::WorkflowExecutionConfig => "WORKFLOW_EXECUTION_CONFIG", - MatchableResource::ClusterAssignment => "CLUSTER_ASSIGNMENT", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TASK_RESOURCE" => Some(Self::TaskResource), - "CLUSTER_RESOURCE" => Some(Self::ClusterResource), - "EXECUTION_QUEUE" => Some(Self::ExecutionQueue), - "EXECUTION_CLUSTER_LABEL" => Some(Self::ExecutionClusterLabel), - "QUALITY_OF_SERVICE_SPECIFICATION" => { - Some(Self::QualityOfServiceSpecification) - } - "PLUGIN_OVERRIDE" => Some(Self::PluginOverride), - "WORKFLOW_EXECUTION_CONFIG" => Some(Self::WorkflowExecutionConfig), - "CLUSTER_ASSIGNMENT" => Some(Self::ClusterAssignment), - _ => None, - } - } -} -/// Defines a set of custom matching attributes which defines resource defaults for a project and domain. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributes { - /// Unique project id for which this set of attributes will be applied. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id for which this set of attributes will be applied. - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - #[prost(message, optional, tag = "3")] - pub matching_attributes: ::core::option::Option, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "4")] - pub org: ::prost::alloc::string::String, -} -/// Sets custom attributes for a project-domain combination. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesUpdateRequest { - /// +required - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesUpdateResponse {} -/// Request to get an individual project domain attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesGetRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id which this set of attributes references. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Which type of matchable attributes to return. - /// +required - #[prost(enumeration = "MatchableResource", tag = "3")] - pub resource_type: i32, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "4")] - pub org: ::prost::alloc::string::String, -} -/// Response to get an individual project domain attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesGetResponse { - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Request to delete a set matchable project domain attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesDeleteRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id which this set of attributes references. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Which type of matchable attributes to delete. - /// +required - #[prost(enumeration = "MatchableResource", tag = "3")] - pub resource_type: i32, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "4")] - pub org: ::prost::alloc::string::String, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectDomainAttributesDeleteResponse {} -/// Defines a set of custom matching attributes at the project level. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributes { - /// Unique project id for which this set of attributes will be applied. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub matching_attributes: ::core::option::Option, - /// Optional, org key applied to the project. - #[prost(string, tag = "3")] - pub org: ::prost::alloc::string::String, -} -/// Sets custom attributes for a project -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesUpdateRequest { - /// +required - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesUpdateResponse {} -/// Request to get an individual project level attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesGetRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Which type of matchable attributes to return. - /// +required - #[prost(enumeration = "MatchableResource", tag = "2")] - pub resource_type: i32, - /// Optional, org key applied to the project. - #[prost(string, tag = "3")] - pub org: ::prost::alloc::string::String, -} -/// Response to get an individual project level attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesGetResponse { - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Request to delete a set matchable project level attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesDeleteRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Which type of matchable attributes to delete. - /// +required - #[prost(enumeration = "MatchableResource", tag = "2")] - pub resource_type: i32, - /// Optional, org key applied to the project. - #[prost(string, tag = "3")] - pub org: ::prost::alloc::string::String, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ProjectAttributesDeleteResponse {} -/// DescriptionEntity contains detailed description for the task/workflow. -/// Documentation could provide insight into the algorithms, business use case, etc. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DescriptionEntity { - /// id represents the unique identifier of the description entity. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// One-liner overview of the entity. - #[prost(string, tag = "2")] - pub short_description: ::prost::alloc::string::String, - /// Full user description with formatting preserved. - #[prost(message, optional, tag = "3")] - pub long_description: ::core::option::Option, - /// Optional link to source code used to define this entity. - #[prost(message, optional, tag = "4")] - pub source_code: ::core::option::Option, - /// User-specified tags. These are arbitrary and can be used for searching - /// filtering and discovering tasks. - #[prost(string, repeated, tag = "5")] - pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Full user description with formatting preserved. This can be rendered -/// by clients, such as the console or command line tools with in-tact -/// formatting. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Description { - /// Format of the long description - #[prost(enumeration = "DescriptionFormat", tag = "3")] - pub format: i32, - /// Optional link to an icon for the entity - #[prost(string, tag = "4")] - pub icon_link: ::prost::alloc::string::String, - #[prost(oneof = "description::Content", tags = "1, 2")] - pub content: ::core::option::Option, -} -/// Nested message and enum types in `Description`. -pub mod description { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Content { - /// long description - no more than 4KB - #[prost(string, tag = "1")] - Value(::prost::alloc::string::String), - /// if the description sizes exceed some threshold we can offload the entire - /// description proto altogether to an external data store, like S3 rather than store inline in the db - #[prost(string, tag = "2")] - Uri(::prost::alloc::string::String), - } -} -/// Link to source code used to define this entity -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SourceCode { - #[prost(string, tag = "1")] - pub link: ::prost::alloc::string::String, -} -/// Represents a list of DescriptionEntities returned from the admin. -/// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DescriptionEntityList { - /// A list of DescriptionEntities returned based on the request. - #[prost(message, repeated, tag = "1")] - pub description_entities: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Represents a request structure to retrieve a list of DescriptionEntities. -/// See :ref:`ref_flyteidl.admin.DescriptionEntity` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DescriptionEntityListRequest { - /// Identifies the specific type of resource that this identifier corresponds to. - #[prost(enumeration = "super::core::ResourceType", tag = "1")] - pub resource_type: i32, - /// The identifier for the description entity. - /// +required - #[prost(message, optional, tag = "2")] - pub id: ::core::option::Option, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "3")] - pub limit: u32, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "4")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "5")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering for returned list. - /// +optional - #[prost(message, optional, tag = "6")] - pub sort_by: ::core::option::Option, -} -/// The format of the long description -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum DescriptionFormat { - Unknown = 0, - Markdown = 1, - Html = 2, - /// python default documentation - comments is rst - Rst = 3, -} -impl DescriptionFormat { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - DescriptionFormat::Unknown => "DESCRIPTION_FORMAT_UNKNOWN", - DescriptionFormat::Markdown => "DESCRIPTION_FORMAT_MARKDOWN", - DescriptionFormat::Html => "DESCRIPTION_FORMAT_HTML", - DescriptionFormat::Rst => "DESCRIPTION_FORMAT_RST", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DESCRIPTION_FORMAT_UNKNOWN" => Some(Self::Unknown), - "DESCRIPTION_FORMAT_MARKDOWN" => Some(Self::Markdown), - "DESCRIPTION_FORMAT_HTML" => Some(Self::Html), - "DESCRIPTION_FORMAT_RST" => Some(Self::Rst), - _ => None, - } - } -} -/// Represents a request structure to create a revision of a task. -/// See :ref:`ref_flyteidl.admin.Task` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskCreateRequest { - /// id represents the unique identifier of the task. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Represents the specification for task. - /// +required - #[prost(message, optional, tag = "2")] - pub spec: ::core::option::Option, -} -/// Represents a response structure if task creation succeeds. -/// -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskCreateResponse {} -/// Flyte workflows are composed of many ordered tasks. That is small, reusable, self-contained logical blocks -/// arranged to process workflow inputs and produce a deterministic set of outputs. -/// Tasks can come in many varieties tuned for specialized behavior. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Task { - /// id represents the unique identifier of the task. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// closure encapsulates all the fields that maps to a compiled version of the task. - #[prost(message, optional, tag = "2")] - pub closure: ::core::option::Option, - /// One-liner overview of the entity. - #[prost(string, tag = "3")] - pub short_description: ::prost::alloc::string::String, -} -/// Represents a list of tasks returned from the admin. -/// See :ref:`ref_flyteidl.admin.Task` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskList { - /// A list of tasks returned based on the request. - #[prost(message, repeated, tag = "1")] - pub tasks: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Represents a structure that encapsulates the user-configured specification of the task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskSpec { - /// Template of the task that encapsulates all the metadata of the task. - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, - /// Represents the specification for description entity. - #[prost(message, optional, tag = "2")] - pub description: ::core::option::Option, -} -/// Compute task attributes which include values derived from the TaskSpec, as well as plugin-specific data -/// and task metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskClosure { - /// Represents the compiled representation of the task from the specification provided. - #[prost(message, optional, tag = "1")] - pub compiled_task: ::core::option::Option, - /// Time at which the task was created. - #[prost(message, optional, tag = "2")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// Represents a request structure to create a revision of a workflow. -/// See :ref:`ref_flyteidl.admin.Workflow` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowCreateRequest { - /// id represents the unique identifier of the workflow. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Represents the specification for workflow. - /// +required - #[prost(message, optional, tag = "2")] - pub spec: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowCreateResponse {} -/// Represents the workflow structure stored in the Admin -/// A workflow is created by ordering tasks and associating outputs to inputs -/// in order to produce a directed-acyclic execution graph. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Workflow { - /// id represents the unique identifier of the workflow. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// closure encapsulates all the fields that maps to a compiled version of the workflow. - #[prost(message, optional, tag = "2")] - pub closure: ::core::option::Option, - /// One-liner overview of the entity. - #[prost(string, tag = "3")] - pub short_description: ::prost::alloc::string::String, -} -/// Represents a list of workflows returned from the admin. -/// See :ref:`ref_flyteidl.admin.Workflow` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowList { - /// A list of workflows returned based on the request. - #[prost(message, repeated, tag = "1")] - pub workflows: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Represents a structure that encapsulates the specification of the workflow. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowSpec { - /// Template of the task that encapsulates all the metadata of the workflow. - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, - /// Workflows that are embedded into other workflows need to be passed alongside the parent workflow to the - /// propeller compiler (since the compiler doesn't have any knowledge of other workflows - ie, it doesn't reach out - /// to Admin to see other registered workflows). In fact, subworkflows do not even need to be registered. - #[prost(message, repeated, tag = "2")] - pub sub_workflows: ::prost::alloc::vec::Vec, - /// Represents the specification for description entity. - #[prost(message, optional, tag = "3")] - pub description: ::core::option::Option, -} -/// A container holding the compiled workflow produced from the WorkflowSpec and additional metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowClosure { - /// Represents the compiled representation of the workflow from the specification provided. - #[prost(message, optional, tag = "1")] - pub compiled_workflow: ::core::option::Option, - /// Time at which the workflow was created. - #[prost(message, optional, tag = "2")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// The workflow id is already used and the structure is different -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowErrorExistsDifferentStructure { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// The workflow id is already used with an identical sctructure -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowErrorExistsIdenticalStructure { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// When a CreateWorkflowRequest fails due to matching id -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateWorkflowFailureReason { - #[prost(oneof = "create_workflow_failure_reason::Reason", tags = "1, 2")] - pub reason: ::core::option::Option, -} -/// Nested message and enum types in `CreateWorkflowFailureReason`. -pub mod create_workflow_failure_reason { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Reason { - #[prost(message, tag = "1")] - ExistsDifferentStructure(super::WorkflowErrorExistsDifferentStructure), - #[prost(message, tag = "2")] - ExistsIdenticalStructure(super::WorkflowErrorExistsIdenticalStructure), - } -} -/// Defines a set of custom matching attributes which defines resource defaults for a project, domain and workflow. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributes { - /// Unique project id for which this set of attributes will be applied. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id for which this set of attributes will be applied. - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Workflow name for which this set of attributes will be applied. - #[prost(string, tag = "3")] - pub workflow: ::prost::alloc::string::String, - #[prost(message, optional, tag = "4")] - pub matching_attributes: ::core::option::Option, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "5")] - pub org: ::prost::alloc::string::String, -} -/// Sets custom attributes for a project, domain and workflow combination. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesUpdateRequest { - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesUpdateResponse {} -/// Request to get an individual workflow attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesGetRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id which this set of attributes references. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Workflow name which this set of attributes references. - /// +required - #[prost(string, tag = "3")] - pub workflow: ::prost::alloc::string::String, - /// Which type of matchable attributes to return. - /// +required - #[prost(enumeration = "MatchableResource", tag = "4")] - pub resource_type: i32, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "5")] - pub org: ::prost::alloc::string::String, -} -/// Response to get an individual workflow attribute override. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesGetResponse { - #[prost(message, optional, tag = "1")] - pub attributes: ::core::option::Option, -} -/// Request to delete a set matchable workflow attribute override. -/// For more info on matchable attributes, see :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesDeleteRequest { - /// Unique project id which this set of attributes references. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Unique domain id which this set of attributes references. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Workflow name which this set of attributes references. - /// +required - #[prost(string, tag = "3")] - pub workflow: ::prost::alloc::string::String, - /// Which type of matchable attributes to delete. - /// +required - #[prost(enumeration = "MatchableResource", tag = "4")] - pub resource_type: i32, - /// Optional, org key applied to the attributes. - #[prost(string, tag = "5")] - pub org: ::prost::alloc::string::String, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowAttributesDeleteResponse {} -/// Option for schedules run at a certain frequency e.g. every 2 minutes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct FixedRate { - #[prost(uint32, tag = "1")] - pub value: u32, - #[prost(enumeration = "FixedRateUnit", tag = "2")] - pub unit: i32, -} -/// Options for schedules to run according to a cron expression. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CronSchedule { - /// Standard/default cron implementation as described by - /// Also supports nonstandard predefined scheduling definitions - /// as described by - /// except @reboot - #[prost(string, tag = "1")] - pub schedule: ::prost::alloc::string::String, - /// ISO 8601 duration as described by - #[prost(string, tag = "2")] - pub offset: ::prost::alloc::string::String, -} -/// Defines complete set of information required to trigger an execution on a schedule. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Schedule { - /// Name of the input variable that the kickoff time will be supplied to when the workflow is kicked off. - #[prost(string, tag = "3")] - pub kickoff_time_input_arg: ::prost::alloc::string::String, - #[prost(oneof = "schedule::ScheduleExpression", tags = "1, 2, 4")] - pub schedule_expression: ::core::option::Option, -} -/// Nested message and enum types in `Schedule`. -pub mod schedule { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum ScheduleExpression { - /// Uses AWS syntax: Minutes Hours Day-of-month Month Day-of-week Year - /// e.g. for a schedule that runs every 15 minutes: 0/15 * * * ? * - #[prost(string, tag = "1")] - CronExpression(::prost::alloc::string::String), - #[prost(message, tag = "2")] - Rate(super::FixedRate), - #[prost(message, tag = "4")] - CronSchedule(super::CronSchedule), - } -} -/// Represents a frequency at which to run a schedule. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum FixedRateUnit { - Minute = 0, - Hour = 1, - Day = 2, -} -impl FixedRateUnit { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - FixedRateUnit::Minute => "MINUTE", - FixedRateUnit::Hour => "HOUR", - FixedRateUnit::Day => "DAY", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "MINUTE" => Some(Self::Minute), - "HOUR" => Some(Self::Hour), - "DAY" => Some(Self::Day), - _ => None, - } - } -} -/// Request to register a launch plan. The included LaunchPlanSpec may have a complete or incomplete set of inputs required -/// to launch a workflow execution. By default all launch plans are registered in state INACTIVE. If you wish to -/// set the state to ACTIVE, you must submit a LaunchPlanUpdateRequest, after you have successfully created a launch plan. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanCreateRequest { - /// Uniquely identifies a launch plan entity. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// User-provided launch plan details, including reference workflow, inputs and other metadata. - #[prost(message, optional, tag = "2")] - pub spec: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanCreateResponse {} -/// A LaunchPlan provides the capability to templatize workflow executions. -/// Launch plans simplify associating one or more schedules, inputs and notifications with your workflows. -/// Launch plans can be shared and used to trigger executions with predefined inputs even when a workflow -/// definition doesn't necessarily have a default value for said input. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlan { - /// Uniquely identifies a launch plan entity. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// User-provided launch plan details, including reference workflow, inputs and other metadata. - #[prost(message, optional, tag = "2")] - pub spec: ::core::option::Option, - /// Values computed by the flyte platform after launch plan registration. - #[prost(message, optional, tag = "3")] - pub closure: ::core::option::Option, -} -/// Response object for list launch plan requests. -/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanList { - #[prost(message, repeated, tag = "1")] - pub launch_plans: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Defines permissions associated with executions created by this launch plan spec. -/// Use either of these roles when they have permissions required by your workflow execution. -/// Deprecated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Auth { - /// Defines an optional iam role which will be used for tasks run in executions created with this launch plan. - #[prost(string, tag = "1")] - pub assumable_iam_role: ::prost::alloc::string::String, - /// Defines an optional kubernetes service account which will be used for tasks run in executions created with this launch plan. - #[prost(string, tag = "2")] - pub kubernetes_service_account: ::prost::alloc::string::String, -} -/// User-provided launch plan definition and configuration values. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanSpec { - /// Reference to the Workflow template that the launch plan references - #[prost(message, optional, tag = "1")] - pub workflow_id: ::core::option::Option, - /// Metadata for the Launch Plan - #[prost(message, optional, tag = "2")] - pub entity_metadata: ::core::option::Option, - /// Input values to be passed for the execution. - /// These can be overridden when an execution is created with this launch plan. - #[prost(message, optional, tag = "3")] - pub default_inputs: ::core::option::Option, - /// Fixed, non-overridable inputs for the Launch Plan. - /// These can not be overridden when an execution is created with this launch plan. - #[prost(message, optional, tag = "4")] - pub fixed_inputs: ::core::option::Option, - /// String to indicate the role to use to execute the workflow underneath - #[deprecated] - #[prost(string, tag = "5")] - pub role: ::prost::alloc::string::String, - /// Custom labels to be applied to the execution resource. - #[prost(message, optional, tag = "6")] - pub labels: ::core::option::Option, - /// Custom annotations to be applied to the execution resource. - #[prost(message, optional, tag = "7")] - pub annotations: ::core::option::Option, - /// Indicates the permission associated with workflow executions triggered with this launch plan. - #[deprecated] - #[prost(message, optional, tag = "8")] - pub auth: ::core::option::Option, - #[deprecated] - #[prost(message, optional, tag = "9")] - pub auth_role: ::core::option::Option, - /// Indicates security context for permissions triggered with this launch plan - #[prost(message, optional, tag = "10")] - pub security_context: ::core::option::Option, - /// Indicates the runtime priority of the execution. - #[prost(message, optional, tag = "16")] - pub quality_of_service: ::core::option::Option, - /// Encapsulates user settings pertaining to offloaded data (i.e. Blobs, Schema, query data, etc.). - #[prost(message, optional, tag = "17")] - pub raw_output_data_config: ::core::option::Option, - /// Controls the maximum number of tasknodes that can be run in parallel for the entire workflow. - /// This is useful to achieve fairness. Note: MapTasks are regarded as one unit, - /// and parallelism/concurrency of MapTasks is independent from this. - #[prost(int32, tag = "18")] - pub max_parallelism: i32, - /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. - /// Omitting this field uses the workflow's value as a default. - /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - /// around the bool field. - #[prost(message, optional, tag = "19")] - pub interruptible: ::core::option::Option, - /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored - /// data once execution finishes successfully. - #[prost(bool, tag = "20")] - pub overwrite_cache: bool, - /// Environment variables to be set for the execution. - #[prost(message, optional, tag = "21")] - pub envs: ::core::option::Option, -} -/// Values computed by the flyte platform after launch plan registration. -/// These include expected_inputs required to be present in a CreateExecutionRequest -/// to launch the reference workflow as well timestamp values associated with the launch plan. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanClosure { - /// Indicate the Launch plan state. - #[prost(enumeration = "LaunchPlanState", tag = "1")] - pub state: i32, - /// Indicates the set of inputs expected when creating an execution with the Launch plan - #[prost(message, optional, tag = "2")] - pub expected_inputs: ::core::option::Option, - /// Indicates the set of outputs expected to be produced by creating an execution with the Launch plan - #[prost(message, optional, tag = "3")] - pub expected_outputs: ::core::option::Option, - /// Time at which the launch plan was created. - #[prost(message, optional, tag = "4")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, - /// Time at which the launch plan was last updated. - #[prost(message, optional, tag = "5")] - pub updated_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// Additional launch plan attributes included in the LaunchPlanSpec not strictly required to launch -/// the reference workflow. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanMetadata { - /// Schedule to execute the Launch Plan - #[prost(message, optional, tag = "1")] - pub schedule: ::core::option::Option, - /// List of notifications based on Execution status transitions - #[prost(message, repeated, tag = "2")] - pub notifications: ::prost::alloc::vec::Vec, - /// Additional metadata for how to launch the launch plan - #[prost(message, optional, tag = "3")] - pub launch_conditions: ::core::option::Option<::prost_types::Any>, -} -/// Request to set the referenced launch plan state to the configured value. -/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanUpdateRequest { - /// Identifier of launch plan for which to change state. - /// +required. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Desired state to apply to the launch plan. - /// +required. - #[prost(enumeration = "LaunchPlanState", tag = "2")] - pub state: i32, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanUpdateResponse {} -/// Represents a request struct for finding an active launch plan for a given NamedEntityIdentifier -/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ActiveLaunchPlanRequest { - /// +required. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Represents a request structure to list active launch plans within a project/domain and optional org. -/// See :ref:`ref_flyteidl.admin.LaunchPlan` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ActiveLaunchPlanListRequest { - /// Name of the project that contains the identifiers. - /// +required. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the identifiers belongs to within the project. - /// +required. - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Indicates the number of resources to be returned. - /// +required. - #[prost(uint32, tag = "3")] - pub limit: u32, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "4")] - pub token: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, - /// Optional, org key applied to the resource. - #[prost(string, tag = "6")] - pub org: ::prost::alloc::string::String, -} -/// By default any launch plan regardless of state can be used to launch a workflow execution. -/// However, at most one version of a launch plan -/// (e.g. a NamedEntityIdentifier set of shared project, domain and name values) can be -/// active at a time in regards to *schedules*. That is, at most one schedule in a NamedEntityIdentifier -/// group will be observed and trigger executions at a defined cadence. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum LaunchPlanState { - Inactive = 0, - Active = 1, -} -impl LaunchPlanState { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - LaunchPlanState::Inactive => "INACTIVE", - LaunchPlanState::Active => "ACTIVE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "INACTIVE" => Some(Self::Inactive), - "ACTIVE" => Some(Self::Active), - _ => None, - } - } -} -/// Indicates that a sent event was not used to update execution state due to -/// the referenced execution already being terminated (and therefore ineligible -/// for further state transitions). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventErrorAlreadyInTerminalState { - /// +required - #[prost(string, tag = "1")] - pub current_phase: ::prost::alloc::string::String, -} -/// Indicates an event was rejected because it came from a different cluster than -/// is on record as running the execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventErrorIncompatibleCluster { - /// The cluster which has been recorded as processing the execution. - /// +required - #[prost(string, tag = "1")] - pub cluster: ::prost::alloc::string::String, -} -/// Indicates why a sent event was not used to update execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventFailureReason { - /// +required - #[prost(oneof = "event_failure_reason::Reason", tags = "1, 2")] - pub reason: ::core::option::Option, -} -/// Nested message and enum types in `EventFailureReason`. -pub mod event_failure_reason { - /// +required - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Reason { - #[prost(message, tag = "1")] - AlreadyInTerminalState(super::EventErrorAlreadyInTerminalState), - #[prost(message, tag = "2")] - IncompatibleCluster(super::EventErrorIncompatibleCluster), - } -} -/// Request to send a notification that a workflow execution event has occurred. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionEventRequest { - /// Unique ID for this request that can be traced between services - #[prost(string, tag = "1")] - pub request_id: ::prost::alloc::string::String, - /// Details about the event that occurred. - #[prost(message, optional, tag = "2")] - pub event: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionEventResponse {} -/// Request to send a notification that a node execution event has occurred. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionEventRequest { - /// Unique ID for this request that can be traced between services - #[prost(string, tag = "1")] - pub request_id: ::prost::alloc::string::String, - /// Details about the event that occurred. - #[prost(message, optional, tag = "2")] - pub event: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionEventResponse {} -/// Request to send a notification that a task execution event has occurred. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionEventRequest { - /// Unique ID for this request that can be traced between services - #[prost(string, tag = "1")] - pub request_id: ::prost::alloc::string::String, - /// Details about the event that occurred. - #[prost(message, optional, tag = "2")] - pub event: ::core::option::Option, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionEventResponse {} -/// Request to launch an execution with the given project, domain and optionally-assigned name. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionCreateRequest { - /// Name of the project the execution belongs to. - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the execution belongs to. - /// A domain can be considered as a subset within a specific project. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// User provided value for the resource. - /// If none is provided the system will generate a unique string. - /// +optional - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - /// Additional fields necessary to launch the execution. - /// +optional - #[prost(message, optional, tag = "4")] - pub spec: ::core::option::Option, - /// The inputs required to start the execution. All required inputs must be - /// included in this map. If not required and not provided, defaults apply. - /// +optional - #[prost(message, optional, tag = "5")] - pub inputs: ::core::option::Option, - /// Optional, org key applied to the resource. - #[prost(string, tag = "6")] - pub org: ::prost::alloc::string::String, -} -/// Request to relaunch the referenced execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionRelaunchRequest { - /// Identifier of the workflow execution to relaunch. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// User provided value for the relaunched execution. - /// If none is provided the system will generate a unique string. - /// +optional - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored - /// data once execution finishes successfully. - #[prost(bool, tag = "4")] - pub overwrite_cache: bool, -} -/// Request to recover the referenced execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionRecoverRequest { - /// Identifier of the workflow execution to recover. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// User provided value for the recovered execution. - /// If none is provided the system will generate a unique string. - /// +optional - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, - /// Additional metadata which will be used to overwrite any metadata in the reference execution when triggering a recovery execution. - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, -} -/// The unique identifier for a successfully created execution. -/// If the name was *not* specified in the create request, this identifier will include a generated name. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionCreateResponse { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// A message used to fetch a single workflow execution entity. -/// See :ref:`ref_flyteidl.admin.Execution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionGetRequest { - /// Uniquely identifies an individual workflow execution. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// A workflow execution represents an instantiated workflow, including all inputs and additional -/// metadata as well as computed results included state, outputs, and duration-based attributes. -/// Used as a response object used in Get and List execution requests. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Execution { - /// Unique identifier of the workflow execution. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// User-provided configuration and inputs for launching the execution. - #[prost(message, optional, tag = "2")] - pub spec: ::core::option::Option, - /// Execution results. - #[prost(message, optional, tag = "3")] - pub closure: ::core::option::Option, -} -/// Used as a response for request to list executions. -/// See :ref:`ref_flyteidl.admin.Execution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionList { - #[prost(message, repeated, tag = "1")] - pub executions: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Input/output data can represented by actual values or a link to where values are stored -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LiteralMapBlob { - #[prost(oneof = "literal_map_blob::Data", tags = "1, 2")] - pub data: ::core::option::Option, -} -/// Nested message and enum types in `LiteralMapBlob`. -pub mod literal_map_blob { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Data { - /// Data in LiteralMap format - #[prost(message, tag = "1")] - Values(super::super::core::LiteralMap), - /// In the event that the map is too large, we return a uri to the data - #[prost(string, tag = "2")] - Uri(::prost::alloc::string::String), - } -} -/// Specifies metadata around an aborted workflow execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AbortMetadata { - /// In the case of a user-specified abort, this will pass along the user-supplied cause. - #[prost(string, tag = "1")] - pub cause: ::prost::alloc::string::String, - /// Identifies the entity (if any) responsible for terminating the execution - #[prost(string, tag = "2")] - pub principal: ::prost::alloc::string::String, -} -/// Encapsulates the results of the Execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionClosure { - /// Inputs computed and passed for execution. - /// computed_inputs depends on inputs in ExecutionSpec, fixed and default inputs in launch plan - #[deprecated] - #[prost(message, optional, tag = "3")] - pub computed_inputs: ::core::option::Option, - /// Most recent recorded phase for the execution. - #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "4")] - pub phase: i32, - /// Reported time at which the execution began running. - #[prost(message, optional, tag = "5")] - pub started_at: ::core::option::Option<::prost_types::Timestamp>, - /// The amount of time the execution spent running. - #[prost(message, optional, tag = "6")] - pub duration: ::core::option::Option<::prost_types::Duration>, - /// Reported time at which the execution was created. - #[prost(message, optional, tag = "7")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, - /// Reported time at which the execution was last updated. - #[prost(message, optional, tag = "8")] - pub updated_at: ::core::option::Option<::prost_types::Timestamp>, - /// The notification settings to use after merging the CreateExecutionRequest and the launch plan - /// notification settings. An execution launched with notifications will always prefer that definition - /// to notifications defined statically in a launch plan. - #[prost(message, repeated, tag = "9")] - pub notifications: ::prost::alloc::vec::Vec, - /// Identifies the workflow definition for this execution. - #[prost(message, optional, tag = "11")] - pub workflow_id: ::core::option::Option, - /// Provides the details of the last stage change - #[prost(message, optional, tag = "14")] - pub state_change_details: ::core::option::Option, - /// A result produced by a terminated execution. - /// A pending (non-terminal) execution will not have any output result. - #[prost(oneof = "execution_closure::OutputResult", tags = "1, 2, 10, 12, 13")] - pub output_result: ::core::option::Option, -} -/// Nested message and enum types in `ExecutionClosure`. -pub mod execution_closure { - /// A result produced by a terminated execution. - /// A pending (non-terminal) execution will not have any output result. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// Output URI in the case of a successful execution. - /// DEPRECATED. Use GetExecutionData to fetch output data instead. - #[prost(message, tag = "1")] - Outputs(super::LiteralMapBlob), - /// Error information in the case of a failed execution. - #[prost(message, tag = "2")] - Error(super::super::core::ExecutionError), - /// In the case of a user-specified abort, this will pass along the user-supplied cause. - #[prost(string, tag = "10")] - AbortCause(::prost::alloc::string::String), - /// In the case of a user-specified abort, this will pass along the user and their supplied cause. - #[prost(message, tag = "12")] - AbortMetadata(super::AbortMetadata), - /// Raw output data produced by this execution. - /// DEPRECATED. Use GetExecutionData to fetch output data instead. - #[prost(message, tag = "13")] - OutputData(super::super::core::LiteralMap), - } -} -/// Represents system, rather than user-facing, metadata about an execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SystemMetadata { - /// Which execution cluster this execution ran on. - #[prost(string, tag = "1")] - pub execution_cluster: ::prost::alloc::string::String, - /// Which kubernetes namespace the execution ran under. - #[prost(string, tag = "2")] - pub namespace: ::prost::alloc::string::String, -} -/// Represents attributes about an execution which are not required to launch the execution but are useful to record. -/// These attributes are assigned at launch time and do not change. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionMetadata { - #[prost(enumeration = "execution_metadata::ExecutionMode", tag = "1")] - pub mode: i32, - /// Identifier of the entity that triggered this execution. - /// For systems using back-end authentication any value set here will be discarded in favor of the - /// authenticated user context. - #[prost(string, tag = "2")] - pub principal: ::prost::alloc::string::String, - /// Indicates the nestedness of this execution. - /// If a user launches a workflow execution, the default nesting is 0. - /// If this execution further launches a workflow (child workflow), the nesting level is incremented by 0 => 1 - /// Generally, if workflow at nesting level k launches a workflow then the child workflow will have - /// nesting = k + 1. - #[prost(uint32, tag = "3")] - pub nesting: u32, - /// For scheduled executions, the requested time for execution for this specific schedule invocation. - #[prost(message, optional, tag = "4")] - pub scheduled_at: ::core::option::Option<::prost_types::Timestamp>, - /// Which subworkflow node (if any) launched this execution - #[prost(message, optional, tag = "5")] - pub parent_node_execution: ::core::option::Option< - super::core::NodeExecutionIdentifier, - >, - /// Optional, a reference workflow execution related to this execution. - /// In the case of a relaunch, this references the original workflow execution. - #[prost(message, optional, tag = "16")] - pub reference_execution: ::core::option::Option< - super::core::WorkflowExecutionIdentifier, - >, - /// Optional, platform-specific metadata about the execution. - /// In this the future this may be gated behind an ACL or some sort of authorization. - #[prost(message, optional, tag = "17")] - pub system_metadata: ::core::option::Option, - /// Save a list of the artifacts used in this execution for now. This is a list only rather than a mapping - /// since we don't have a structure to handle nested ones anyways. - #[prost(message, repeated, tag = "18")] - pub artifact_ids: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `ExecutionMetadata`. -pub mod execution_metadata { - /// The method by which this execution was launched. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ExecutionMode { - /// The default execution mode, MANUAL implies that an execution was launched by an individual. - Manual = 0, - /// A schedule triggered this execution launch. - Scheduled = 1, - /// A system process was responsible for launching this execution rather an individual. - System = 2, - /// This execution was launched with identical inputs as a previous execution. - Relaunch = 3, - /// This execution was triggered by another execution. - ChildWorkflow = 4, - /// This execution was recovered from another execution. - Recovered = 5, - /// Execution was kicked off by the artifact trigger system - Trigger = 6, - } - impl ExecutionMode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ExecutionMode::Manual => "MANUAL", - ExecutionMode::Scheduled => "SCHEDULED", - ExecutionMode::System => "SYSTEM", - ExecutionMode::Relaunch => "RELAUNCH", - ExecutionMode::ChildWorkflow => "CHILD_WORKFLOW", - ExecutionMode::Recovered => "RECOVERED", - ExecutionMode::Trigger => "TRIGGER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "MANUAL" => Some(Self::Manual), - "SCHEDULED" => Some(Self::Scheduled), - "SYSTEM" => Some(Self::System), - "RELAUNCH" => Some(Self::Relaunch), - "CHILD_WORKFLOW" => Some(Self::ChildWorkflow), - "RECOVERED" => Some(Self::Recovered), - "TRIGGER" => Some(Self::Trigger), - _ => None, - } - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NotificationList { - #[prost(message, repeated, tag = "1")] - pub notifications: ::prost::alloc::vec::Vec, -} -/// An ExecutionSpec encompasses all data used to launch this execution. The Spec does not change over the lifetime -/// of an execution as it progresses across phase changes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionSpec { - /// Launch plan to be executed - #[prost(message, optional, tag = "1")] - pub launch_plan: ::core::option::Option, - /// Input values to be passed for the execution - #[deprecated] - #[prost(message, optional, tag = "2")] - pub inputs: ::core::option::Option, - /// Metadata for the execution - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, - /// Labels to apply to the execution resource. - #[prost(message, optional, tag = "7")] - pub labels: ::core::option::Option, - /// Annotations to apply to the execution resource. - #[prost(message, optional, tag = "8")] - pub annotations: ::core::option::Option, - /// Optional: security context override to apply this execution. - #[prost(message, optional, tag = "10")] - pub security_context: ::core::option::Option, - /// Optional: auth override to apply this execution. - #[deprecated] - #[prost(message, optional, tag = "16")] - pub auth_role: ::core::option::Option, - /// Indicates the runtime priority of the execution. - #[prost(message, optional, tag = "17")] - pub quality_of_service: ::core::option::Option, - /// Controls the maximum number of task nodes that can be run in parallel for the entire workflow. - /// This is useful to achieve fairness. Note: MapTasks are regarded as one unit, - /// and parallelism/concurrency of MapTasks is independent from this. - #[prost(int32, tag = "18")] - pub max_parallelism: i32, - /// User setting to configure where to store offloaded data (i.e. Blobs, structured datasets, query data, etc.). - /// This should be a prefix like s3://my-bucket/my-data - #[prost(message, optional, tag = "19")] - pub raw_output_data_config: ::core::option::Option, - /// Controls how to select an available cluster on which this execution should run. - #[prost(message, optional, tag = "20")] - pub cluster_assignment: ::core::option::Option, - /// Allows for the interruptible flag of a workflow to be overwritten for a single execution. - /// Omitting this field uses the workflow's value as a default. - /// As we need to distinguish between the field not being provided and its default value false, we have to use a wrapper - /// around the bool field. - #[prost(message, optional, tag = "21")] - pub interruptible: ::core::option::Option, - /// Allows for all cached values of a workflow and its tasks to be overwritten for a single execution. - /// If enabled, all calculations are performed even if cached results would be available, overwriting the stored - /// data once execution finishes successfully. - #[prost(bool, tag = "22")] - pub overwrite_cache: bool, - /// Environment variables to be set for the execution. - #[prost(message, optional, tag = "23")] - pub envs: ::core::option::Option, - /// Tags to be set for the execution. - #[prost(string, repeated, tag = "24")] - pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Execution cluster label to be set for the execution. - #[prost(message, optional, tag = "25")] - pub execution_cluster_label: ::core::option::Option, - #[prost(oneof = "execution_spec::NotificationOverrides", tags = "5, 6")] - pub notification_overrides: ::core::option::Option< - execution_spec::NotificationOverrides, - >, -} -/// Nested message and enum types in `ExecutionSpec`. -pub mod execution_spec { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum NotificationOverrides { - /// List of notifications based on Execution status transitions - /// When this list is not empty it is used rather than any notifications defined in the referenced launch plan. - /// When this list is empty, the notifications defined for the launch plan will be applied. - #[prost(message, tag = "5")] - Notifications(super::NotificationList), - /// This should be set to true if all notifications are intended to be disabled for this execution. - #[prost(bool, tag = "6")] - DisableAll(bool), - } -} -/// Request to terminate an in-progress execution. This action is irreversible. -/// If an execution is already terminated, this request will simply be a no-op. -/// This request will fail if it references a non-existent execution. -/// If the request succeeds the phase "ABORTED" will be recorded for the termination -/// with the optional cause added to the output_result. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionTerminateRequest { - /// Uniquely identifies the individual workflow execution to be terminated. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Optional reason for aborting. - #[prost(string, tag = "2")] - pub cause: ::prost::alloc::string::String, -} -/// Purposefully empty, may be populated in the future. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionTerminateResponse {} -/// Request structure to fetch inputs, output and other data produced by an execution. -/// By default this data is not returned inline in :ref:`ref_flyteidl.admin.WorkflowExecutionGetRequest` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionGetDataRequest { - /// The identifier of the execution for which to fetch inputs and outputs. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Response structure for WorkflowExecutionGetDataRequest which contains inputs and outputs for an execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionGetDataResponse { - /// Signed url to fetch a core.LiteralMap of execution outputs. - /// Deprecated: Please use full_outputs instead. - #[deprecated] - #[prost(message, optional, tag = "1")] - pub outputs: ::core::option::Option, - /// Signed url to fetch a core.LiteralMap of execution inputs. - /// Deprecated: Please use full_inputs instead. - #[deprecated] - #[prost(message, optional, tag = "2")] - pub inputs: ::core::option::Option, - /// Full_inputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "3")] - pub full_inputs: ::core::option::Option, - /// Full_outputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "4")] - pub full_outputs: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionUpdateRequest { - /// Identifier of the execution to update - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// State to set as the new value active/archive - #[prost(enumeration = "ExecutionState", tag = "2")] - pub state: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionStateChangeDetails { - /// The state of the execution is used to control its visibility in the UI/CLI. - #[prost(enumeration = "ExecutionState", tag = "1")] - pub state: i32, - /// This timestamp represents when the state changed. - #[prost(message, optional, tag = "2")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, - /// Identifies the entity (if any) responsible for causing the state change of the execution - #[prost(string, tag = "3")] - pub principal: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionUpdateResponse {} -/// WorkflowExecutionGetMetricsRequest represents a request to retrieve metrics for the specified workflow execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionGetMetricsRequest { - /// id defines the workflow execution to query for. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// depth defines the number of Flyte entity levels to traverse when breaking down execution details. - #[prost(int32, tag = "2")] - pub depth: i32, -} -/// WorkflowExecutionGetMetricsResponse represents the response containing metrics for the specified workflow execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionGetMetricsResponse { - /// Span defines the top-level breakdown of the workflows execution. More precise information is nested in a - /// hierarchical structure using Flyte entity references. - #[prost(message, optional, tag = "1")] - pub span: ::core::option::Option, -} -/// The state of the execution is used to control its visibility in the UI/CLI. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ExecutionState { - /// By default, all executions are considered active. - ExecutionActive = 0, - /// Archived executions are no longer visible in the UI. - ExecutionArchived = 1, -} -impl ExecutionState { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ExecutionState::ExecutionActive => "EXECUTION_ACTIVE", - ExecutionState::ExecutionArchived => "EXECUTION_ARCHIVED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "EXECUTION_ACTIVE" => Some(Self::ExecutionActive), - "EXECUTION_ARCHIVED" => Some(Self::ExecutionArchived), - _ => None, - } - } -} -/// A message used to fetch a single node execution entity. -/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionGetRequest { - /// Uniquely identifies an individual node execution. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Represents a request structure to retrieve a list of node execution entities. -/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionListRequest { - /// Indicates the workflow execution to filter by. - /// +required - #[prost(message, optional, tag = "1")] - pub workflow_execution_id: ::core::option::Option< - super::core::WorkflowExecutionIdentifier, - >, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "2")] - pub limit: u32, - #[prost(string, tag = "3")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "4")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, - /// Unique identifier of the parent node in the execution - /// +optional - #[prost(string, tag = "6")] - pub unique_parent_id: ::prost::alloc::string::String, -} -/// Represents a request structure to retrieve a list of node execution entities launched by a specific task. -/// This can arise when a task yields a subworkflow. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionForTaskListRequest { - /// Indicates the node execution to filter by. - /// +required - #[prost(message, optional, tag = "1")] - pub task_execution_id: ::core::option::Option, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "2")] - pub limit: u32, - /// In the case of multiple pages of results, the, server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "3")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "4")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, -} -/// Encapsulates all details for a single node execution entity. -/// A node represents a component in the overall workflow graph. A node launch a task, multiple tasks, an entire nested -/// sub-workflow, or even a separate child-workflow execution. -/// The same task can be called repeatedly in a single workflow but each node is unique. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecution { - /// Uniquely identifies an individual node execution. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Path to remote data store where input blob is stored. - #[prost(string, tag = "2")] - pub input_uri: ::prost::alloc::string::String, - /// Computed results associated with this node execution. - #[prost(message, optional, tag = "3")] - pub closure: ::core::option::Option, - /// Metadata for Node Execution - #[prost(message, optional, tag = "4")] - pub metadata: ::core::option::Option, -} -/// Represents additional attributes related to a Node Execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionMetaData { - /// Node executions are grouped depending on retries of the parent - /// Retry group is unique within the context of a parent node. - #[prost(string, tag = "1")] - pub retry_group: ::prost::alloc::string::String, - /// Boolean flag indicating if the node has child nodes under it - /// This can be true when a node contains a dynamic workflow which then produces - /// child nodes. - #[prost(bool, tag = "2")] - pub is_parent_node: bool, - /// Node id of the node in the original workflow - /// This maps to value of WorkflowTemplate.nodes\[X\].id - #[prost(string, tag = "3")] - pub spec_node_id: ::prost::alloc::string::String, - /// Boolean flag indicating if the node has contains a dynamic workflow which then produces child nodes. - /// This is to distinguish between subworkflows and dynamic workflows which can both have is_parent_node as true. - #[prost(bool, tag = "4")] - pub is_dynamic: bool, - /// Boolean flag indicating if the node is an array node. This is intended to uniquely identify - /// array nodes from other nodes which can have is_parent_node as true. - #[prost(bool, tag = "5")] - pub is_array: bool, -} -/// Request structure to retrieve a list of node execution entities. -/// See :ref:`ref_flyteidl.admin.NodeExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionList { - #[prost(message, repeated, tag = "1")] - pub node_executions: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Container for node execution details and results. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionClosure { - /// The last recorded phase for this node execution. - #[prost(enumeration = "super::core::node_execution::Phase", tag = "3")] - pub phase: i32, - /// Time at which the node execution began running. - #[prost(message, optional, tag = "4")] - pub started_at: ::core::option::Option<::prost_types::Timestamp>, - /// The amount of time the node execution spent running. - #[prost(message, optional, tag = "5")] - pub duration: ::core::option::Option<::prost_types::Duration>, - /// Time at which the node execution was created. - #[prost(message, optional, tag = "6")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, - /// Time at which the node execution was last updated. - #[prost(message, optional, tag = "7")] - pub updated_at: ::core::option::Option<::prost_types::Timestamp>, - /// String location uniquely identifying where the deck HTML file is. - /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - #[prost(string, tag = "11")] - pub deck_uri: ::prost::alloc::string::String, - /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for a DynamicWorkflow. This is required - /// to correctly recover partially completed executions where the subworkflow has already been compiled. - #[prost(string, tag = "12")] - pub dynamic_job_spec_uri: ::prost::alloc::string::String, - /// Only a node in a terminal state will have a non-empty output_result. - #[prost(oneof = "node_execution_closure::OutputResult", tags = "1, 2, 10")] - pub output_result: ::core::option::Option, - /// Store metadata for what the node launched. - /// for ex: if this is a workflow node, we store information for the launched workflow. - #[prost(oneof = "node_execution_closure::TargetMetadata", tags = "8, 9")] - pub target_metadata: ::core::option::Option, -} -/// Nested message and enum types in `NodeExecutionClosure`. -pub mod node_execution_closure { - /// Only a node in a terminal state will have a non-empty output_result. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// Links to a remotely stored, serialized core.LiteralMap of node execution outputs. - /// DEPRECATED. Use GetNodeExecutionData to fetch output data instead. - #[prost(string, tag = "1")] - OutputUri(::prost::alloc::string::String), - /// Error information for the Node - #[prost(message, tag = "2")] - Error(super::super::core::ExecutionError), - /// Raw output data produced by this node execution. - /// DEPRECATED. Use GetNodeExecutionData to fetch output data instead. - #[prost(message, tag = "10")] - OutputData(super::super::core::LiteralMap), - } - /// Store metadata for what the node launched. - /// for ex: if this is a workflow node, we store information for the launched workflow. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum TargetMetadata { - #[prost(message, tag = "8")] - WorkflowNodeMetadata(super::WorkflowNodeMetadata), - #[prost(message, tag = "9")] - TaskNodeMetadata(super::TaskNodeMetadata), - } -} -/// Metadata for a WorkflowNode -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowNodeMetadata { - /// The identifier for a workflow execution launched by a node. - #[prost(message, optional, tag = "1")] - pub execution_id: ::core::option::Option, -} -/// Metadata for the case in which the node is a TaskNode -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskNodeMetadata { - /// Captures the status of caching for this execution. - #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "1")] - pub cache_status: i32, - /// This structure carries the catalog artifact information - #[prost(message, optional, tag = "2")] - pub catalog_key: ::core::option::Option, - /// The latest checkpoint location - #[prost(string, tag = "4")] - pub checkpoint_uri: ::prost::alloc::string::String, -} -/// For dynamic workflow nodes we capture information about the dynamic workflow definition that gets generated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DynamicWorkflowNodeMetadata { - /// id represents the unique identifier of the workflow. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Represents the compiled representation of the embedded dynamic workflow. - #[prost(message, optional, tag = "2")] - pub compiled_workflow: ::core::option::Option, - /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is - /// required to correctly recover partially completed executions where the subworkflow has already been compiled. - #[prost(string, tag = "3")] - pub dynamic_job_spec_uri: ::prost::alloc::string::String, -} -/// Request structure to fetch inputs and output for a node execution. -/// By default, these are not returned in :ref:`ref_flyteidl.admin.NodeExecutionGetRequest` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionGetDataRequest { - /// The identifier of the node execution for which to fetch inputs and outputs. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Response structure for NodeExecutionGetDataRequest which contains inputs and outputs for a node execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionGetDataResponse { - /// Signed url to fetch a core.LiteralMap of node execution inputs. - /// Deprecated: Please use full_inputs instead. - #[deprecated] - #[prost(message, optional, tag = "1")] - pub inputs: ::core::option::Option, - /// Signed url to fetch a core.LiteralMap of node execution outputs. - /// Deprecated: Please use full_outputs instead. - #[deprecated] - #[prost(message, optional, tag = "2")] - pub outputs: ::core::option::Option, - /// Full_inputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "3")] - pub full_inputs: ::core::option::Option, - /// Full_outputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "4")] - pub full_outputs: ::core::option::Option, - /// Optional Workflow closure for a dynamically generated workflow, in the case this node yields a dynamic workflow we return its structure here. - #[prost(message, optional, tag = "16")] - pub dynamic_workflow: ::core::option::Option, - #[prost(message, optional, tag = "17")] - pub flyte_urls: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetDynamicNodeWorkflowRequest { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DynamicNodeWorkflowResponse { - #[prost(message, optional, tag = "1")] - pub compiled_workflow: ::core::option::Option, -} -/// A message used to fetch a single task execution entity. -/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionGetRequest { - /// Unique identifier for the task execution. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Represents a request structure to retrieve a list of task execution entities yielded by a specific node execution. -/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionListRequest { - /// Indicates the node execution to filter by. - /// +required - #[prost(message, optional, tag = "1")] - pub node_execution_id: ::core::option::Option, - /// Indicates the number of resources to be returned. - /// +required - #[prost(uint32, tag = "2")] - pub limit: u32, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. - /// +optional - #[prost(string, tag = "3")] - pub token: ::prost::alloc::string::String, - /// Indicates a list of filters passed as string. - /// More info on constructing filters : - /// +optional - #[prost(string, tag = "4")] - pub filters: ::prost::alloc::string::String, - /// Sort ordering for returned list. - /// +optional - #[prost(message, optional, tag = "5")] - pub sort_by: ::core::option::Option, -} -/// Encapsulates all details for a single task execution entity. -/// A task execution represents an instantiated task, including all inputs and additional -/// metadata as well as computed results included state, outputs, and duration-based attributes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecution { - /// Unique identifier for the task execution. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Path to remote data store where input blob is stored. - #[prost(string, tag = "2")] - pub input_uri: ::prost::alloc::string::String, - /// Task execution details and results. - #[prost(message, optional, tag = "3")] - pub closure: ::core::option::Option, - /// Whether this task spawned nodes. - #[prost(bool, tag = "4")] - pub is_parent: bool, -} -/// Response structure for a query to list of task execution entities. -/// See :ref:`ref_flyteidl.admin.TaskExecution` for more details -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionList { - #[prost(message, repeated, tag = "1")] - pub task_executions: ::prost::alloc::vec::Vec, - /// In the case of multiple pages of results, the server-provided token can be used to fetch the next page - /// in a query. If there are no more results, this value will be empty. - #[prost(string, tag = "2")] - pub token: ::prost::alloc::string::String, -} -/// Container for task execution details and results. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionClosure { - /// The last recorded phase for this task execution. - #[prost(enumeration = "super::core::task_execution::Phase", tag = "3")] - pub phase: i32, - /// Detailed log information output by the task execution. - #[prost(message, repeated, tag = "4")] - pub logs: ::prost::alloc::vec::Vec, - /// Time at which the task execution began running. - #[prost(message, optional, tag = "5")] - pub started_at: ::core::option::Option<::prost_types::Timestamp>, - /// The amount of time the task execution spent running. - #[prost(message, optional, tag = "6")] - pub duration: ::core::option::Option<::prost_types::Duration>, - /// Time at which the task execution was created. - #[prost(message, optional, tag = "7")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, - /// Time at which the task execution was last updated. - #[prost(message, optional, tag = "8")] - pub updated_at: ::core::option::Option<::prost_types::Timestamp>, - /// Custom data specific to the task plugin. - #[prost(message, optional, tag = "9")] - pub custom_info: ::core::option::Option<::prost_types::Struct>, - /// If there is an explanation for the most recent phase transition, the reason will capture it. - #[prost(string, tag = "10")] - pub reason: ::prost::alloc::string::String, - /// A predefined yet extensible Task type identifier. - #[prost(string, tag = "11")] - pub task_type: ::prost::alloc::string::String, - /// Metadata around how a task was executed. - #[prost(message, optional, tag = "16")] - pub metadata: ::core::option::Option, - /// The event version is used to indicate versioned changes in how data is maintained using this - /// proto message. For example, event_verison > 0 means that maps tasks logs use the - /// TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog - /// in this message. - #[prost(int32, tag = "17")] - pub event_version: i32, - /// A time-series of the phase transition or update explanations. This, when compared to storing a singular reason - /// as previously done, is much more valuable in visualizing and understanding historical evaluations. - #[prost(message, repeated, tag = "18")] - pub reasons: ::prost::alloc::vec::Vec, - #[prost(oneof = "task_execution_closure::OutputResult", tags = "1, 2, 12")] - pub output_result: ::core::option::Option, -} -/// Nested message and enum types in `TaskExecutionClosure`. -pub mod task_execution_closure { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// Path to remote data store where output blob is stored if the execution succeeded (and produced outputs). - /// DEPRECATED. Use GetTaskExecutionData to fetch output data instead. - #[prost(string, tag = "1")] - OutputUri(::prost::alloc::string::String), - /// Error information for the task execution. Populated if the execution failed. - #[prost(message, tag = "2")] - Error(super::super::core::ExecutionError), - /// Raw output data produced by this task execution. - /// DEPRECATED. Use GetTaskExecutionData to fetch output data instead. - #[prost(message, tag = "12")] - OutputData(super::super::core::LiteralMap), - } -} -/// Reason is a single message annotated with a timestamp to indicate the instant the reason occurred. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Reason { - /// occurred_at is the timestamp indicating the instant that this reason happened. - #[prost(message, optional, tag = "1")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, - /// message is the explanation for the most recent phase transition or status update. - #[prost(string, tag = "2")] - pub message: ::prost::alloc::string::String, -} -/// Request structure to fetch inputs and output for a task execution. -/// By default this data is not returned inline in :ref:`ref_flyteidl.admin.TaskExecutionGetRequest` -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionGetDataRequest { - /// The identifier of the task execution for which to fetch inputs and outputs. - /// +required - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -/// Response structure for TaskExecutionGetDataRequest which contains inputs and outputs for a task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionGetDataResponse { - /// Signed url to fetch a core.LiteralMap of task execution inputs. - /// Deprecated: Please use full_inputs instead. - #[deprecated] - #[prost(message, optional, tag = "1")] - pub inputs: ::core::option::Option, - /// Signed url to fetch a core.LiteralMap of task execution outputs. - /// Deprecated: Please use full_outputs instead. - #[deprecated] - #[prost(message, optional, tag = "2")] - pub outputs: ::core::option::Option, - /// Full_inputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "3")] - pub full_inputs: ::core::option::Option, - /// Full_outputs will only be populated if they are under a configured size threshold. - #[prost(message, optional, tag = "4")] - pub full_outputs: ::core::option::Option, - /// flyte tiny url to fetch a core.LiteralMap of task execution's IO - /// Deck will be empty for task - #[prost(message, optional, tag = "5")] - pub flyte_urls: ::core::option::Option, -} -/// Response for the GetVersion API -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetVersionResponse { - /// The control plane version information. FlyteAdmin and related components - /// form the control plane of Flyte - #[prost(message, optional, tag = "1")] - pub control_plane_version: ::core::option::Option, -} -/// Provides Version information for a component -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Version { - /// Specifies the GIT sha of the build - #[prost(string, tag = "1")] - pub build: ::prost::alloc::string::String, - /// Version for the build, should follow a semver - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// Build timestamp - #[prost(string, tag = "3")] - pub build_time: ::prost::alloc::string::String, -} -/// Empty request for GetVersion -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetVersionRequest {} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs deleted file mode 100644 index 54ee6985b9..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.cacheservice.rs +++ /dev/null @@ -1,399 +0,0 @@ -/// -/// Additional metadata as key-value pairs -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct KeyMapMetadata { - /// Additional metadata as key-value pairs - #[prost(map = "string, string", tag = "1")] - pub values: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// -/// Metadata for cached outputs, including the source identifier and timestamps. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Metadata { - /// Source task or workflow identifier - #[prost(message, optional, tag = "1")] - pub source_identifier: ::core::option::Option, - /// Additional metadata as key-value pairs - #[prost(message, optional, tag = "2")] - pub key_map: ::core::option::Option, - /// Creation timestamp - #[prost(message, optional, tag = "3")] - pub created_at: ::core::option::Option<::prost_types::Timestamp>, - /// Last update timestamp - #[prost(message, optional, tag = "4")] - pub last_updated_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// -/// Represents cached output, either as literals or an URI, with associated metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CachedOutput { - /// Associated metadata - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, - #[prost(oneof = "cached_output::Output", tags = "1, 2")] - pub output: ::core::option::Option, -} -/// Nested message and enum types in `CachedOutput`. -pub mod cached_output { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Output { - /// Output literals - #[prost(message, tag = "1")] - OutputLiterals(super::super::core::LiteralMap), - /// URI to output data - #[prost(string, tag = "2")] - OutputUri(::prost::alloc::string::String), - } -} -/// -/// Request to retrieve cached data by key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetCacheRequest { - /// Cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, -} -/// -/// Response with cached data for a given key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetCacheResponse { - /// Cached output - #[prost(message, optional, tag = "1")] - pub output: ::core::option::Option, -} -/// -/// Request to store/update cached data by key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PutCacheRequest { - /// Cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// Output to cache - #[prost(message, optional, tag = "2")] - pub output: ::core::option::Option, - /// Overwrite flag - #[prost(bool, tag = "3")] - pub overwrite: bool, -} -/// -/// Response message of cache store/update operation. -/// -/// Empty, success indicated by no errors -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PutCacheResponse {} -/// -/// Request to delete cached data by key. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteCacheRequest { - /// Cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, -} -/// -/// Response message of cache deletion operation. -/// -/// Empty, success indicated by no errors -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DeleteCacheResponse {} -/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Reservation { - /// The unique ID for the reservation - same as the cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, - /// Requested reservation extension heartbeat interval - #[prost(message, optional, tag = "3")] - pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, - /// Expiration timestamp of this reservation - #[prost(message, optional, tag = "4")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// -/// Request to get or extend a reservation for a cache key -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetOrExtendReservationRequest { - /// The unique ID for the reservation - same as the cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, - /// Requested reservation extension heartbeat interval - #[prost(message, optional, tag = "3")] - pub heartbeat_interval: ::core::option::Option<::prost_types::Duration>, -} -/// -/// Request to get or extend a reservation for a cache key -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetOrExtendReservationResponse { - /// The reservation that was created or extended - #[prost(message, optional, tag = "1")] - pub reservation: ::core::option::Option, -} -/// -/// Request to release the reservation for a cache key -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReleaseReservationRequest { - /// The unique ID for the reservation - same as the cache key - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// The unique ID of the owner for the reservation - #[prost(string, tag = "2")] - pub owner_id: ::prost::alloc::string::String, -} -/// -/// Response message of release reservation operation. -/// -/// Empty, success indicated by no errors -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ReleaseReservationResponse {} -/// Generated client implementations. -pub mod cache_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// - /// CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. - #[derive(Debug, Clone)] - pub struct CacheServiceClient { - inner: tonic::client::Grpc, - } - impl CacheServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl CacheServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> CacheServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - CacheServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves cached data by key. - pub async fn get( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.cacheservice.CacheService/Get", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Get")); - self.inner.unary(req, path, codec).await - } - /// Stores or updates cached data by key. - pub async fn put( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.cacheservice.CacheService/Put", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Put")); - self.inner.unary(req, path, codec).await - } - /// Deletes cached data by key. - pub async fn delete( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.cacheservice.CacheService/Delete", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.cacheservice.CacheService", "Delete")); - self.inner.unary(req, path, codec).await - } - /// Get or extend a reservation for a cache key - pub async fn get_or_extend_reservation( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.cacheservice.CacheService/GetOrExtendReservation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.cacheservice.CacheService", - "GetOrExtendReservation", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Release the reservation for a cache key - pub async fn release_reservation( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.cacheservice.CacheService/ReleaseReservation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.cacheservice.CacheService", - "ReleaseReservation", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs deleted file mode 100644 index 8aa1741309..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.core.rs +++ /dev/null @@ -1,3162 +0,0 @@ -/// Indicates various phases of Workflow Execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecution {} -/// Nested message and enum types in `WorkflowExecution`. -pub mod workflow_execution { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Phase { - Undefined = 0, - Queued = 1, - Running = 2, - Succeeding = 3, - Succeeded = 4, - Failing = 5, - Failed = 6, - Aborted = 7, - TimedOut = 8, - Aborting = 9, - } - impl Phase { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Phase::Undefined => "UNDEFINED", - Phase::Queued => "QUEUED", - Phase::Running => "RUNNING", - Phase::Succeeding => "SUCCEEDING", - Phase::Succeeded => "SUCCEEDED", - Phase::Failing => "FAILING", - Phase::Failed => "FAILED", - Phase::Aborted => "ABORTED", - Phase::TimedOut => "TIMED_OUT", - Phase::Aborting => "ABORTING", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNDEFINED" => Some(Self::Undefined), - "QUEUED" => Some(Self::Queued), - "RUNNING" => Some(Self::Running), - "SUCCEEDING" => Some(Self::Succeeding), - "SUCCEEDED" => Some(Self::Succeeded), - "FAILING" => Some(Self::Failing), - "FAILED" => Some(Self::Failed), - "ABORTED" => Some(Self::Aborted), - "TIMED_OUT" => Some(Self::TimedOut), - "ABORTING" => Some(Self::Aborting), - _ => None, - } - } - } -} -/// Indicates various phases of Node Execution that only include the time spent to run the nodes/workflows -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecution {} -/// Nested message and enum types in `NodeExecution`. -pub mod node_execution { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Phase { - Undefined = 0, - Queued = 1, - Running = 2, - Succeeded = 3, - Failing = 4, - Failed = 5, - Aborted = 6, - Skipped = 7, - TimedOut = 8, - DynamicRunning = 9, - Recovered = 10, - } - impl Phase { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Phase::Undefined => "UNDEFINED", - Phase::Queued => "QUEUED", - Phase::Running => "RUNNING", - Phase::Succeeded => "SUCCEEDED", - Phase::Failing => "FAILING", - Phase::Failed => "FAILED", - Phase::Aborted => "ABORTED", - Phase::Skipped => "SKIPPED", - Phase::TimedOut => "TIMED_OUT", - Phase::DynamicRunning => "DYNAMIC_RUNNING", - Phase::Recovered => "RECOVERED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNDEFINED" => Some(Self::Undefined), - "QUEUED" => Some(Self::Queued), - "RUNNING" => Some(Self::Running), - "SUCCEEDED" => Some(Self::Succeeded), - "FAILING" => Some(Self::Failing), - "FAILED" => Some(Self::Failed), - "ABORTED" => Some(Self::Aborted), - "SKIPPED" => Some(Self::Skipped), - "TIMED_OUT" => Some(Self::TimedOut), - "DYNAMIC_RUNNING" => Some(Self::DynamicRunning), - "RECOVERED" => Some(Self::Recovered), - _ => None, - } - } - } -} -/// Phases that task plugins can go through. Not all phases may be applicable to a specific plugin task, -/// but this is the cumulative list that customers may want to know about for their task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecution {} -/// Nested message and enum types in `TaskExecution`. -pub mod task_execution { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Phase { - Undefined = 0, - Queued = 1, - Running = 2, - Succeeded = 3, - Aborted = 4, - Failed = 5, - /// To indicate cases where task is initializing, like: ErrImagePull, ContainerCreating, PodInitializing - Initializing = 6, - /// To address cases, where underlying resource is not available: Backoff error, Resource quota exceeded - WaitingForResources = 7, - } - impl Phase { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Phase::Undefined => "UNDEFINED", - Phase::Queued => "QUEUED", - Phase::Running => "RUNNING", - Phase::Succeeded => "SUCCEEDED", - Phase::Aborted => "ABORTED", - Phase::Failed => "FAILED", - Phase::Initializing => "INITIALIZING", - Phase::WaitingForResources => "WAITING_FOR_RESOURCES", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNDEFINED" => Some(Self::Undefined), - "QUEUED" => Some(Self::Queued), - "RUNNING" => Some(Self::Running), - "SUCCEEDED" => Some(Self::Succeeded), - "ABORTED" => Some(Self::Aborted), - "FAILED" => Some(Self::Failed), - "INITIALIZING" => Some(Self::Initializing), - "WAITING_FOR_RESOURCES" => Some(Self::WaitingForResources), - _ => None, - } - } - } -} -/// Represents the error message from the execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionError { - /// Error code indicates a grouping of a type of error. - /// More Info: - #[prost(string, tag = "1")] - pub code: ::prost::alloc::string::String, - /// Detailed description of the error - including stack trace. - #[prost(string, tag = "2")] - pub message: ::prost::alloc::string::String, - /// Full error contents accessible via a URI - #[prost(string, tag = "3")] - pub error_uri: ::prost::alloc::string::String, - #[prost(enumeration = "execution_error::ErrorKind", tag = "4")] - pub kind: i32, -} -/// Nested message and enum types in `ExecutionError`. -pub mod execution_error { - /// Error type: System or User - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ErrorKind { - Unknown = 0, - User = 1, - System = 2, - } - impl ErrorKind { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ErrorKind::Unknown => "UNKNOWN", - ErrorKind::User => "USER", - ErrorKind::System => "SYSTEM", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "USER" => Some(Self::User), - "SYSTEM" => Some(Self::System), - _ => None, - } - } - } -} -/// Log information for the task that is specific to a log sink -/// When our log story is flushed out, we may have more metadata here like log link expiry -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskLog { - #[prost(string, tag = "1")] - pub uri: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, - #[prost(enumeration = "task_log::MessageFormat", tag = "3")] - pub message_format: i32, - #[prost(message, optional, tag = "4")] - pub ttl: ::core::option::Option<::prost_types::Duration>, -} -/// Nested message and enum types in `TaskLog`. -pub mod task_log { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum MessageFormat { - Unknown = 0, - Csv = 1, - Json = 2, - } - impl MessageFormat { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - MessageFormat::Unknown => "UNKNOWN", - MessageFormat::Csv => "CSV", - MessageFormat::Json => "JSON", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "CSV" => Some(Self::Csv), - "JSON" => Some(Self::Json), - _ => None, - } - } - } -} -/// Represents customized execution run-time attributes. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QualityOfServiceSpec { - /// Indicates how much queueing delay an execution can tolerate. - #[prost(message, optional, tag = "1")] - pub queueing_budget: ::core::option::Option<::prost_types::Duration>, -} -/// Indicates the priority of an execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QualityOfService { - #[prost(oneof = "quality_of_service::Designation", tags = "1, 2")] - pub designation: ::core::option::Option, -} -/// Nested message and enum types in `QualityOfService`. -pub mod quality_of_service { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Tier { - /// Default: no quality of service specified. - Undefined = 0, - High = 1, - Medium = 2, - Low = 3, - } - impl Tier { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Tier::Undefined => "UNDEFINED", - Tier::High => "HIGH", - Tier::Medium => "MEDIUM", - Tier::Low => "LOW", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNDEFINED" => Some(Self::Undefined), - "HIGH" => Some(Self::High), - "MEDIUM" => Some(Self::Medium), - "LOW" => Some(Self::Low), - _ => None, - } - } - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Designation { - #[prost(enumeration = "Tier", tag = "1")] - Tier(i32), - #[prost(message, tag = "2")] - Spec(super::QualityOfServiceSpec), - } -} -/// Encapsulation of fields that uniquely identifies a Flyte resource. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Identifier { - /// Identifies the specific type of resource that this identifier corresponds to. - #[prost(enumeration = "ResourceType", tag = "1")] - pub resource_type: i32, - /// Name of the project the resource belongs to. - #[prost(string, tag = "2")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the resource belongs to. - /// A domain can be considered as a subset within a specific project. - #[prost(string, tag = "3")] - pub domain: ::prost::alloc::string::String, - /// User provided value for the resource. - #[prost(string, tag = "4")] - pub name: ::prost::alloc::string::String, - /// Specific version of the resource. - #[prost(string, tag = "5")] - pub version: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "6")] - pub org: ::prost::alloc::string::String, -} -/// Encapsulation of fields that uniquely identifies a Flyte workflow execution -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionIdentifier { - /// Name of the project the resource belongs to. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Name of the domain the resource belongs to. - /// A domain can be considered as a subset within a specific project. - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// User or system provided value for the resource. - #[prost(string, tag = "4")] - pub name: ::prost::alloc::string::String, - /// Optional, org key applied to the resource. - #[prost(string, tag = "5")] - pub org: ::prost::alloc::string::String, -} -/// Encapsulation of fields that identify a Flyte node execution entity. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionIdentifier { - #[prost(string, tag = "1")] - pub node_id: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub execution_id: ::core::option::Option, -} -/// Encapsulation of fields that identify a Flyte task execution entity. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionIdentifier { - #[prost(message, optional, tag = "1")] - pub task_id: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub node_execution_id: ::core::option::Option, - #[prost(uint32, tag = "3")] - pub retry_attempt: u32, -} -/// Encapsulation of fields the uniquely identify a signal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalIdentifier { - /// Unique identifier for a signal. - #[prost(string, tag = "1")] - pub signal_id: ::prost::alloc::string::String, - /// Identifies the Flyte workflow execution this signal belongs to. - #[prost(message, optional, tag = "2")] - pub execution_id: ::core::option::Option, -} -/// Indicates a resource type within Flyte. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ResourceType { - Unspecified = 0, - Task = 1, - Workflow = 2, - LaunchPlan = 3, - /// A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects. - /// Eventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects - /// in a similar manner to other Flyte objects - Dataset = 4, -} -impl ResourceType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ResourceType::Unspecified => "UNSPECIFIED", - ResourceType::Task => "TASK", - ResourceType::Workflow => "WORKFLOW", - ResourceType::LaunchPlan => "LAUNCH_PLAN", - ResourceType::Dataset => "DATASET", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSPECIFIED" => Some(Self::Unspecified), - "TASK" => Some(Self::Task), - "WORKFLOW" => Some(Self::Workflow), - "LAUNCH_PLAN" => Some(Self::LaunchPlan), - "DATASET" => Some(Self::Dataset), - _ => None, - } - } -} -/// Defines schema columns and types to strongly type-validate schemas interoperability. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SchemaType { - /// A list of ordered columns this schema comprises of. - #[prost(message, repeated, tag = "3")] - pub columns: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SchemaType`. -pub mod schema_type { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct SchemaColumn { - /// A unique name -within the schema type- for the column - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The column type. This allows a limited set of types currently. - #[prost(enumeration = "schema_column::SchemaColumnType", tag = "2")] - pub r#type: i32, - } - /// Nested message and enum types in `SchemaColumn`. - pub mod schema_column { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum SchemaColumnType { - Integer = 0, - Float = 1, - String = 2, - Boolean = 3, - Datetime = 4, - Duration = 5, - } - impl SchemaColumnType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - SchemaColumnType::Integer => "INTEGER", - SchemaColumnType::Float => "FLOAT", - SchemaColumnType::String => "STRING", - SchemaColumnType::Boolean => "BOOLEAN", - SchemaColumnType::Datetime => "DATETIME", - SchemaColumnType::Duration => "DURATION", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "INTEGER" => Some(Self::Integer), - "FLOAT" => Some(Self::Float), - "STRING" => Some(Self::String), - "BOOLEAN" => Some(Self::Boolean), - "DATETIME" => Some(Self::Datetime), - "DURATION" => Some(Self::Duration), - _ => None, - } - } - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StructuredDatasetType { - /// A list of ordered columns this schema comprises of. - #[prost(message, repeated, tag = "1")] - pub columns: ::prost::alloc::vec::Vec, - /// This is the storage format, the format of the bits at rest - /// parquet, feather, csv, etc. - /// For two types to be compatible, the format will need to be an exact match. - #[prost(string, tag = "2")] - pub format: ::prost::alloc::string::String, - /// This is a string representing the type that the bytes in external_schema_bytes are formatted in. - /// This is an optional field that will not be used for type checking. - #[prost(string, tag = "3")] - pub external_schema_type: ::prost::alloc::string::String, - /// The serialized bytes of a third-party schema library like Arrow. - /// This is an optional field that will not be used for type checking. - #[prost(bytes = "vec", tag = "4")] - pub external_schema_bytes: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `StructuredDatasetType`. -pub mod structured_dataset_type { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct DatasetColumn { - /// A unique name within the schema type for the column. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The column type. - #[prost(message, optional, tag = "2")] - pub literal_type: ::core::option::Option, - } -} -/// Defines type behavior for blob objects -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlobType { - /// Format can be a free form string understood by SDK/UI etc like - /// csv, parquet etc - #[prost(string, tag = "1")] - pub format: ::prost::alloc::string::String, - #[prost(enumeration = "blob_type::BlobDimensionality", tag = "2")] - pub dimensionality: i32, -} -/// Nested message and enum types in `BlobType`. -pub mod blob_type { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum BlobDimensionality { - Single = 0, - Multipart = 1, - } - impl BlobDimensionality { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - BlobDimensionality::Single => "SINGLE", - BlobDimensionality::Multipart => "MULTIPART", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "SINGLE" => Some(Self::Single), - "MULTIPART" => Some(Self::Multipart), - _ => None, - } - } - } -} -/// Enables declaring enum types, with predefined string values -/// For len(values) > 0, the first value in the ordered list is regarded as the default value. If you wish -/// To provide no defaults, make the first value as undefined. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EnumType { - /// Predefined set of enum values. - #[prost(string, repeated, tag = "1")] - pub values: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Defines a tagged union type, also known as a variant (and formally as the sum type). -/// -/// A sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag -/// A value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by -/// storing the varaint's tag with the literal value and can be examined in runtime. -/// -/// Type S is typically written as -/// S := Apple A | Banana B | Cantaloupe C | ... -/// -/// Notably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value: -/// Optional X := X | Null -/// -/// See also: -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnionType { - /// Predefined set of variants in union. - #[prost(message, repeated, tag = "1")] - pub variants: ::prost::alloc::vec::Vec, -} -/// Hints to improve type matching -/// e.g. allows distinguishing output from custom type transformers -/// even if the underlying IDL serialization matches. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TypeStructure { - /// Must exactly match for types to be castable - #[prost(string, tag = "1")] - pub tag: ::prost::alloc::string::String, - /// dataclass_type only exists for dataclasses. - /// This is used to resolve the type of the fields of dataclass - /// The key is the field name, and the value is the literal type of the field - /// e.g. For dataclass Foo, with fields a, and a is a string - /// Foo.a will be resolved as a literal type of string from dataclass_type - #[prost(map = "string, message", tag = "2")] - pub dataclass_type: ::std::collections::HashMap< - ::prost::alloc::string::String, - LiteralType, - >, -} -/// TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TypeAnnotation { - /// A arbitrary JSON payload to describe a type. - #[prost(message, optional, tag = "1")] - pub annotations: ::core::option::Option<::prost_types::Struct>, -} -/// Defines a strong type to allow type checking between interfaces. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LiteralType { - /// This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by - /// consumers to identify special behavior or display extended information for the type. - #[prost(message, optional, tag = "6")] - pub metadata: ::core::option::Option<::prost_types::Struct>, - /// This field contains arbitrary data that might have special semantic - /// meaning for the client but does not effect internal flyte behavior. - #[prost(message, optional, tag = "9")] - pub annotation: ::core::option::Option, - /// Hints to improve type matching. - #[prost(message, optional, tag = "11")] - pub structure: ::core::option::Option, - #[prost(oneof = "literal_type::Type", tags = "1, 2, 3, 4, 5, 7, 8, 10")] - pub r#type: ::core::option::Option, -} -/// Nested message and enum types in `LiteralType`. -pub mod literal_type { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Type { - /// A simple type that can be compared one-to-one with another. - #[prost(enumeration = "super::SimpleType", tag = "1")] - Simple(i32), - /// A complex type that requires matching of inner fields. - #[prost(message, tag = "2")] - Schema(super::SchemaType), - /// Defines the type of the value of a collection. Only homogeneous collections are allowed. - #[prost(message, tag = "3")] - CollectionType(::prost::alloc::boxed::Box), - /// Defines the type of the value of a map type. The type of the key is always a string. - #[prost(message, tag = "4")] - MapValueType(::prost::alloc::boxed::Box), - /// A blob might have specialized implementation details depending on associated metadata. - #[prost(message, tag = "5")] - Blob(super::BlobType), - /// Defines an enum with pre-defined string values. - #[prost(message, tag = "7")] - EnumType(super::EnumType), - /// Generalized schema support - #[prost(message, tag = "8")] - StructuredDatasetType(super::StructuredDatasetType), - /// Defines an union type with pre-defined LiteralTypes. - #[prost(message, tag = "10")] - UnionType(super::UnionType), - } -} -/// A reference to an output produced by a node. The type can be retrieved -and validated- from -/// the underlying interface of the node. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OutputReference { - /// Node id must exist at the graph layer. - #[prost(string, tag = "1")] - pub node_id: ::prost::alloc::string::String, - /// Variable name must refer to an output variable for the node. - #[prost(string, tag = "2")] - pub var: ::prost::alloc::string::String, - #[prost(message, repeated, tag = "3")] - pub attr_path: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PromiseAttribute { - #[prost(oneof = "promise_attribute::Value", tags = "1, 2")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `PromiseAttribute`. -pub mod promise_attribute { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - #[prost(string, tag = "1")] - StringValue(::prost::alloc::string::String), - #[prost(int32, tag = "2")] - IntValue(i32), - } -} -/// Represents an error thrown from a node. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Error { - /// The node id that threw the error. - #[prost(string, tag = "1")] - pub failed_node_id: ::prost::alloc::string::String, - /// Error message thrown. - #[prost(string, tag = "2")] - pub message: ::prost::alloc::string::String, -} -/// Define a set of simple types. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum SimpleType { - None = 0, - Integer = 1, - Float = 2, - String = 3, - Boolean = 4, - Datetime = 5, - Duration = 6, - Binary = 7, - Error = 8, - Struct = 9, -} -impl SimpleType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - SimpleType::None => "NONE", - SimpleType::Integer => "INTEGER", - SimpleType::Float => "FLOAT", - SimpleType::String => "STRING", - SimpleType::Boolean => "BOOLEAN", - SimpleType::Datetime => "DATETIME", - SimpleType::Duration => "DURATION", - SimpleType::Binary => "BINARY", - SimpleType::Error => "ERROR", - SimpleType::Struct => "STRUCT", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "NONE" => Some(Self::None), - "INTEGER" => Some(Self::Integer), - "FLOAT" => Some(Self::Float), - "STRING" => Some(Self::String), - "BOOLEAN" => Some(Self::Boolean), - "DATETIME" => Some(Self::Datetime), - "DURATION" => Some(Self::Duration), - "BINARY" => Some(Self::Binary), - "ERROR" => Some(Self::Error), - "STRUCT" => Some(Self::Struct), - _ => None, - } - } -} -/// Primitive Types -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Primitive { - /// Defines one of simple primitive types. These types will get translated into different programming languages as - /// described in - #[prost(oneof = "primitive::Value", tags = "1, 2, 3, 4, 5, 6")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `Primitive`. -pub mod primitive { - /// Defines one of simple primitive types. These types will get translated into different programming languages as - /// described in - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - #[prost(int64, tag = "1")] - Integer(i64), - #[prost(double, tag = "2")] - FloatValue(f64), - #[prost(string, tag = "3")] - StringValue(::prost::alloc::string::String), - #[prost(bool, tag = "4")] - Boolean(bool), - #[prost(message, tag = "5")] - Datetime(::prost_types::Timestamp), - #[prost(message, tag = "6")] - Duration(::prost_types::Duration), - } -} -/// Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally -/// undefined since it can be assigned to a scalar of any LiteralType. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Void {} -/// Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is. -/// There are no restrictions on how the uri is formatted since it will depend on how to interact with the store. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Blob { - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - #[prost(string, tag = "3")] - pub uri: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlobMetadata { - #[prost(message, optional, tag = "1")] - pub r#type: ::core::option::Option, -} -/// A simple byte array with a tag to help different parts of the system communicate about what is in the byte array. -/// It's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Binary { - #[prost(bytes = "vec", tag = "1")] - pub value: ::prost::alloc::vec::Vec, - #[prost(string, tag = "2")] - pub tag: ::prost::alloc::string::String, -} -/// A strongly typed schema that defines the interface of data retrieved from the underlying storage medium. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Schema { - #[prost(string, tag = "1")] - pub uri: ::prost::alloc::string::String, - #[prost(message, optional, tag = "3")] - pub r#type: ::core::option::Option, -} -/// The runtime representation of a tagged union value. See `UnionType` for more details. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Union { - #[prost(message, optional, boxed, tag = "1")] - pub value: ::core::option::Option<::prost::alloc::boxed::Box>, - #[prost(message, optional, tag = "2")] - pub r#type: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StructuredDatasetMetadata { - /// Bundle the type information along with the literal. - /// This is here because StructuredDatasets can often be more defined at run time than at compile time. - /// That is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset, - /// without any column information, but at run time, you might have that column information. - /// flytekit python will copy this type information into the literal, from the type information, if not provided by - /// the various plugins (encoders). - /// Since this field is run time generated, it's not used for any type checking. - #[prost(message, optional, tag = "1")] - pub structured_dataset_type: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct StructuredDataset { - /// String location uniquely identifying where the data is. - /// Should start with the storage location (e.g. s3://, gs://, bq://, etc.) - #[prost(string, tag = "1")] - pub uri: ::prost::alloc::string::String, - #[prost(message, optional, tag = "2")] - pub metadata: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Scalar { - #[prost(oneof = "scalar::Value", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `Scalar`. -pub mod scalar { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - #[prost(message, tag = "1")] - Primitive(super::Primitive), - #[prost(message, tag = "2")] - Blob(super::Blob), - #[prost(message, tag = "3")] - Binary(super::Binary), - #[prost(message, tag = "4")] - Schema(super::Schema), - #[prost(message, tag = "5")] - NoneType(super::Void), - #[prost(message, tag = "6")] - Error(super::Error), - #[prost(message, tag = "7")] - Generic(::prost_types::Struct), - #[prost(message, tag = "8")] - StructuredDataset(super::StructuredDataset), - #[prost(message, tag = "9")] - Union(::prost::alloc::boxed::Box), - } -} -/// A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Literal { - /// A hash representing this literal. - /// This is used for caching purposes. For more details refer to RFC 1893 - /// () - #[prost(string, tag = "4")] - pub hash: ::prost::alloc::string::String, - /// Additional metadata for literals. - #[prost(map = "string, string", tag = "5")] - pub metadata: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - #[prost(oneof = "literal::Value", tags = "1, 2, 3")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `Literal`. -pub mod literal { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - /// A simple value. - #[prost(message, tag = "1")] - Scalar(::prost::alloc::boxed::Box), - /// A collection of literals to allow nesting. - #[prost(message, tag = "2")] - Collection(super::LiteralCollection), - /// A map of strings to literals. - #[prost(message, tag = "3")] - Map(super::LiteralMap), - } -} -/// A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LiteralCollection { - #[prost(message, repeated, tag = "1")] - pub literals: ::prost::alloc::vec::Vec, -} -/// A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LiteralMap { - #[prost(map = "string, message", tag = "1")] - pub literals: ::std::collections::HashMap<::prost::alloc::string::String, Literal>, -} -/// A collection of BindingData items. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BindingDataCollection { - #[prost(message, repeated, tag = "1")] - pub bindings: ::prost::alloc::vec::Vec, -} -/// A map of BindingData items. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BindingDataMap { - #[prost(map = "string, message", tag = "1")] - pub bindings: ::std::collections::HashMap< - ::prost::alloc::string::String, - BindingData, - >, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnionInfo { - #[prost(message, optional, tag = "1")] - pub target_type: ::core::option::Option, -} -/// Specifies either a simple value or a reference to another output. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BindingData { - #[prost(message, optional, tag = "5")] - pub union: ::core::option::Option, - #[prost(oneof = "binding_data::Value", tags = "1, 2, 3, 4")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `BindingData`. -pub mod binding_data { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - /// A simple scalar value. - #[prost(message, tag = "1")] - Scalar(super::Scalar), - /// A collection of binding data. This allows nesting of binding data to any number - /// of levels. - #[prost(message, tag = "2")] - Collection(super::BindingDataCollection), - /// References an output promised by another node. - #[prost(message, tag = "3")] - Promise(super::OutputReference), - /// A map of bindings. The key is always a string. - #[prost(message, tag = "4")] - Map(super::BindingDataMap), - } -} -/// An input/output binding of a variable to either static value or a node output. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Binding { - /// Variable name must match an input/output variable of the node. - #[prost(string, tag = "1")] - pub var: ::prost::alloc::string::String, - /// Data to use to bind this variable. - #[prost(message, optional, tag = "2")] - pub binding: ::core::option::Option, -} -/// A generic key value pair. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct KeyValuePair { - /// required. - #[prost(string, tag = "1")] - pub key: ::prost::alloc::string::String, - /// +optional. - #[prost(string, tag = "2")] - pub value: ::prost::alloc::string::String, -} -/// Retry strategy associated with an executable unit. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RetryStrategy { - /// Number of retries. Retries will be consumed when the job fails with a recoverable error. - /// The number of retries must be less than or equals to 10. - #[prost(uint32, tag = "5")] - pub retries: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactKey { - /// Project and domain and suffix needs to be unique across a given artifact store. - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub org: ::prost::alloc::string::String, -} -/// Only valid for triggers -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactBindingData { - /// This is only relevant in the time partition case - #[prost(message, optional, tag = "7")] - pub time_transform: ::core::option::Option, - /// These two fields are only relevant in the partition value case - #[prost(oneof = "artifact_binding_data::PartitionData", tags = "5, 6")] - pub partition_data: ::core::option::Option, -} -/// Nested message and enum types in `ArtifactBindingData`. -pub mod artifact_binding_data { - /// These two fields are only relevant in the partition value case - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum PartitionData { - #[prost(string, tag = "5")] - PartitionKey(::prost::alloc::string::String), - #[prost(bool, tag = "6")] - BindToTimePartition(bool), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TimeTransform { - #[prost(string, tag = "1")] - pub transform: ::prost::alloc::string::String, - #[prost(enumeration = "Operator", tag = "2")] - pub op: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct InputBindingData { - #[prost(string, tag = "1")] - pub var: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RuntimeBinding {} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LabelValue { - #[prost(oneof = "label_value::Value", tags = "1, 2, 3, 4, 5")] - pub value: ::core::option::Option, -} -/// Nested message and enum types in `LabelValue`. -pub mod label_value { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Value { - /// The string static value is for use in the Partitions object - #[prost(string, tag = "1")] - StaticValue(::prost::alloc::string::String), - /// The time value is for use in the TimePartition case - #[prost(message, tag = "2")] - TimeValue(::prost_types::Timestamp), - #[prost(message, tag = "3")] - TriggeredBinding(super::ArtifactBindingData), - #[prost(message, tag = "4")] - InputBinding(super::InputBindingData), - #[prost(message, tag = "5")] - RuntimeBinding(super::RuntimeBinding), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Partitions { - #[prost(map = "string, message", tag = "1")] - pub value: ::std::collections::HashMap<::prost::alloc::string::String, LabelValue>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TimePartition { - #[prost(message, optional, tag = "1")] - pub value: ::core::option::Option, - #[prost(enumeration = "Granularity", tag = "2")] - pub granularity: i32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactId { - #[prost(message, optional, tag = "1")] - pub artifact_key: ::core::option::Option, - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// Think of a partition as a tag on an Artifact, except it's a key-value pair. - /// Different partitions naturally have different versions (execution ids). - #[prost(message, optional, tag = "3")] - pub partitions: ::core::option::Option, - /// There is no such thing as an empty time partition - if it's not set, then there is no time partition. - #[prost(message, optional, tag = "4")] - pub time_partition: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactTag { - #[prost(message, optional, tag = "1")] - pub artifact_key: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option, -} -/// Uniqueness constraints for Artifacts -/// - project, domain, name, version, partitions -/// Option 2 (tags are standalone, point to an individual artifact id): -/// - project, domain, name, alias (points to one partition if partitioned) -/// - project, domain, name, partition key, partition value -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArtifactQuery { - #[prost(oneof = "artifact_query::Identifier", tags = "1, 2, 3, 4")] - pub identifier: ::core::option::Option, -} -/// Nested message and enum types in `ArtifactQuery`. -pub mod artifact_query { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Identifier { - #[prost(message, tag = "1")] - ArtifactId(super::ArtifactId), - #[prost(message, tag = "2")] - ArtifactTag(super::ArtifactTag), - #[prost(string, tag = "3")] - Uri(::prost::alloc::string::String), - /// This is used in the trigger case, where a user specifies a value for an input that is one of the triggering - /// artifacts, or a partition value derived from a triggering artifact. - #[prost(message, tag = "4")] - Binding(super::ArtifactBindingData), - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum Granularity { - Unset = 0, - Minute = 1, - Hour = 2, - /// default - Day = 3, - Month = 4, -} -impl Granularity { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Granularity::Unset => "UNSET", - Granularity::Minute => "MINUTE", - Granularity::Hour => "HOUR", - Granularity::Day => "DAY", - Granularity::Month => "MONTH", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNSET" => Some(Self::Unset), - "MINUTE" => Some(Self::Minute), - "HOUR" => Some(Self::Hour), - "DAY" => Some(Self::Day), - "MONTH" => Some(Self::Month), - _ => None, - } - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum Operator { - Minus = 0, - Plus = 1, -} -impl Operator { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Operator::Minus => "MINUS", - Operator::Plus => "PLUS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "MINUS" => Some(Self::Minus), - "PLUS" => Some(Self::Plus), - _ => None, - } - } -} -/// Defines a strongly typed variable. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Variable { - /// Variable literal type. - #[prost(message, optional, tag = "1")] - pub r#type: ::core::option::Option, - /// +optional string describing input variable - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// +optional This object allows the user to specify how Artifacts are created. - /// name, tag, partitions can be specified. The other fields (version and project/domain) are ignored. - #[prost(message, optional, tag = "3")] - pub artifact_partial_id: ::core::option::Option, - #[prost(message, optional, tag = "4")] - pub artifact_tag: ::core::option::Option, -} -/// A map of Variables -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct VariableMap { - /// Defines a map of variable names to variables. - #[prost(map = "string, message", tag = "1")] - pub variables: ::std::collections::HashMap<::prost::alloc::string::String, Variable>, -} -/// Defines strongly typed inputs and outputs. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TypedInterface { - #[prost(message, optional, tag = "1")] - pub inputs: ::core::option::Option, - #[prost(message, optional, tag = "2")] - pub outputs: ::core::option::Option, -} -/// A parameter is used as input to a launch plan and has -/// the special ability to have a default value or mark itself as required. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Parameter { - /// +required Variable. Defines the type of the variable backing this parameter. - #[prost(message, optional, tag = "1")] - pub var: ::core::option::Option, - /// +optional - #[prost(oneof = "parameter::Behavior", tags = "2, 3, 4, 5")] - pub behavior: ::core::option::Option, -} -/// Nested message and enum types in `Parameter`. -pub mod parameter { - /// +optional - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Behavior { - /// Defines a default value that has to match the variable type defined. - #[prost(message, tag = "2")] - Default(super::Literal), - /// +optional, is this value required to be filled. - #[prost(bool, tag = "3")] - Required(bool), - /// This is an execution time search basically that should result in exactly one Artifact with a Type that - /// matches the type of the variable. - #[prost(message, tag = "4")] - ArtifactQuery(super::ArtifactQuery), - #[prost(message, tag = "5")] - ArtifactId(super::ArtifactId), - } -} -/// A map of Parameters. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ParameterMap { - /// Defines a map of parameter names to parameters. - #[prost(map = "string, message", tag = "1")] - pub parameters: ::std::collections::HashMap< - ::prost::alloc::string::String, - Parameter, - >, -} -/// Secret encapsulates information about the secret a task needs to proceed. An environment variable -/// FLYTE_SECRETS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if -/// secrets are passed through environment variables. -/// FLYTE_SECRETS_DEFAULT_DIR will be passed to indicate the prefix of the path where secrets will be mounted if secrets -/// are passed through file mounts. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Secret { - /// The name of the secret group where to find the key referenced below. For K8s secrets, this should be the name of - /// the v1/secret object. For Confidant, this should be the Credential name. For Vault, this should be the secret name. - /// For AWS Secret Manager, this should be the name of the secret. - /// +required - #[prost(string, tag = "1")] - pub group: ::prost::alloc::string::String, - /// The group version to fetch. This is not supported in all secret management systems. It'll be ignored for the ones - /// that do not support it. - /// +optional - #[prost(string, tag = "2")] - pub group_version: ::prost::alloc::string::String, - /// The name of the secret to mount. This has to match an existing secret in the system. It's up to the implementation - /// of the secret management system to require case sensitivity. For K8s secrets, Confidant and Vault, this should - /// match one of the keys inside the secret. For AWS Secret Manager, it's ignored. - /// +optional - #[prost(string, tag = "3")] - pub key: ::prost::alloc::string::String, - /// mount_requirement is optional. Indicates where the secret has to be mounted. If provided, the execution will fail - /// if the underlying key management system cannot satisfy that requirement. If not provided, the default location - /// will depend on the key management system. - /// +optional - #[prost(enumeration = "secret::MountType", tag = "4")] - pub mount_requirement: i32, -} -/// Nested message and enum types in `Secret`. -pub mod secret { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum MountType { - /// Default case, indicates the client can tolerate either mounting options. - Any = 0, - /// ENV_VAR indicates the secret needs to be mounted as an environment variable. - EnvVar = 1, - /// FILE indicates the secret needs to be mounted as a file. - File = 2, - } - impl MountType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - MountType::Any => "ANY", - MountType::EnvVar => "ENV_VAR", - MountType::File => "FILE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "ANY" => Some(Self::Any), - "ENV_VAR" => Some(Self::EnvVar), - "FILE" => Some(Self::File), - _ => None, - } - } - } -} -/// OAuth2Client encapsulates OAuth2 Client Credentials to be used when making calls on behalf of that task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OAuth2Client { - /// client_id is the public id for the client to use. The system will not perform any pre-auth validation that the - /// secret requested matches the client_id indicated here. - /// +required - #[prost(string, tag = "1")] - pub client_id: ::prost::alloc::string::String, - /// client_secret is a reference to the secret used to authenticate the OAuth2 client. - /// +required - #[prost(message, optional, tag = "2")] - pub client_secret: ::core::option::Option, -} -/// Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the -/// right identity for the execution environment. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Identity { - /// iam_role references the fully qualified name of Identity & Access Management role to impersonate. - #[prost(string, tag = "1")] - pub iam_role: ::prost::alloc::string::String, - /// k8s_service_account references a kubernetes service account to impersonate. - #[prost(string, tag = "2")] - pub k8s_service_account: ::prost::alloc::string::String, - /// oauth2_client references an oauth2 client. Backend plugins can use this information to impersonate the client when - /// making external calls. - #[prost(message, optional, tag = "3")] - pub oauth2_client: ::core::option::Option, - /// execution_identity references the subject who makes the execution - #[prost(string, tag = "4")] - pub execution_identity: ::prost::alloc::string::String, -} -/// OAuth2TokenRequest encapsulates information needed to request an OAuth2 token. -/// FLYTE_TOKENS_ENV_PREFIX will be passed to indicate the prefix of the environment variables that will be present if -/// tokens are passed through environment variables. -/// FLYTE_TOKENS_PATH_PREFIX will be passed to indicate the prefix of the path where secrets will be mounted if tokens -/// are passed through file mounts. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OAuth2TokenRequest { - /// name indicates a unique id for the token request within this task token requests. It'll be used as a suffix for - /// environment variables and as a filename for mounting tokens as files. - /// +required - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// type indicates the type of the request to make. Defaults to CLIENT_CREDENTIALS. - /// +required - #[prost(enumeration = "o_auth2_token_request::Type", tag = "2")] - pub r#type: i32, - /// client references the client_id/secret to use to request the OAuth2 token. - /// +required - #[prost(message, optional, tag = "3")] - pub client: ::core::option::Option, - /// idp_discovery_endpoint references the discovery endpoint used to retrieve token endpoint and other related - /// information. - /// +optional - #[prost(string, tag = "4")] - pub idp_discovery_endpoint: ::prost::alloc::string::String, - /// token_endpoint references the token issuance endpoint. If idp_discovery_endpoint is not provided, this parameter is - /// mandatory. - /// +optional - #[prost(string, tag = "5")] - pub token_endpoint: ::prost::alloc::string::String, -} -/// Nested message and enum types in `OAuth2TokenRequest`. -pub mod o_auth2_token_request { - /// Type of the token requested. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Type { - /// CLIENT_CREDENTIALS indicates a 2-legged OAuth token requested using client credentials. - ClientCredentials = 0, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::ClientCredentials => "CLIENT_CREDENTIALS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CLIENT_CREDENTIALS" => Some(Self::ClientCredentials), - _ => None, - } - } - } -} -/// SecurityContext holds security attributes that apply to tasks. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SecurityContext { - /// run_as encapsulates the identity a pod should run as. If the task fills in multiple fields here, it'll be up to the - /// backend plugin to choose the appropriate identity for the execution engine the task will run on. - #[prost(message, optional, tag = "1")] - pub run_as: ::core::option::Option, - /// secrets indicate the list of secrets the task needs in order to proceed. Secrets will be mounted/passed to the - /// pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS - /// Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access - /// to the secret) and to pass it to the remote execution engine. - #[prost(message, repeated, tag = "2")] - pub secrets: ::prost::alloc::vec::Vec, - /// tokens indicate the list of token requests the task needs in order to proceed. Tokens will be mounted/passed to the - /// pod as it starts. If the plugin responsible for kicking of the task will not run it on a flyte cluster (e.g. AWS - /// Batch), it's the responsibility of the plugin to fetch the secret (which means propeller identity will need access - /// to the secret) and to pass it to the remote execution engine. - #[prost(message, repeated, tag = "3")] - pub tokens: ::prost::alloc::vec::Vec, -} -/// A customizable interface to convey resources requested for a container. This can be interpreted differently for different -/// container engines. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Resources { - /// The desired set of resources requested. ResourceNames must be unique within the list. - #[prost(message, repeated, tag = "1")] - pub requests: ::prost::alloc::vec::Vec, - /// Defines a set of bounds (e.g. min/max) within which the task can reliably run. ResourceNames must be unique - /// within the list. - #[prost(message, repeated, tag = "2")] - pub limits: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `Resources`. -pub mod resources { - /// Encapsulates a resource name and value. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct ResourceEntry { - /// Resource name. - #[prost(enumeration = "ResourceName", tag = "1")] - pub name: i32, - /// Value must be a valid k8s quantity. See - /// - #[prost(string, tag = "2")] - pub value: ::prost::alloc::string::String, - } - /// Known resource names. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum ResourceName { - Unknown = 0, - Cpu = 1, - Gpu = 2, - Memory = 3, - Storage = 4, - /// For Kubernetes-based deployments, pods use ephemeral local storage for scratch space, caching, and for logs. - EphemeralStorage = 5, - } - impl ResourceName { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ResourceName::Unknown => "UNKNOWN", - ResourceName::Cpu => "CPU", - ResourceName::Gpu => "GPU", - ResourceName::Memory => "MEMORY", - ResourceName::Storage => "STORAGE", - ResourceName::EphemeralStorage => "EPHEMERAL_STORAGE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "CPU" => Some(Self::Cpu), - "GPU" => Some(Self::Gpu), - "MEMORY" => Some(Self::Memory), - "STORAGE" => Some(Self::Storage), - "EPHEMERAL_STORAGE" => Some(Self::EphemeralStorage), - _ => None, - } - } - } -} -/// Metadata associated with the GPU accelerator to allocate to a task. Contains -/// information about device type, and for multi-instance GPUs, the partition size to -/// use. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GpuAccelerator { - /// This can be any arbitrary string, and should be informed by the labels or taints - /// associated with the nodes in question. Default cloud provider labels typically - /// use the following values: `nvidia-tesla-t4`, `nvidia-tesla-a100`, etc. - #[prost(string, tag = "1")] - pub device: ::prost::alloc::string::String, - #[prost(oneof = "gpu_accelerator::PartitionSizeValue", tags = "2, 3")] - pub partition_size_value: ::core::option::Option< - gpu_accelerator::PartitionSizeValue, - >, -} -/// Nested message and enum types in `GPUAccelerator`. -pub mod gpu_accelerator { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum PartitionSizeValue { - #[prost(bool, tag = "2")] - Unpartitioned(bool), - /// Like `device`, this can be any arbitrary string, and should be informed by - /// the labels or taints associated with the nodes in question. Default cloud - /// provider labels typically use the following values: `1g.5gb`, `2g.10gb`, etc. - #[prost(string, tag = "3")] - PartitionSize(::prost::alloc::string::String), - } -} -/// Encapsulates all non-standard resources, not captured by v1.ResourceRequirements, to -/// allocate to a task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExtendedResources { - /// GPU accelerator to select for task. Contains information about device type, and - /// for multi-instance GPUs, the partition size to use. - #[prost(message, optional, tag = "1")] - pub gpu_accelerator: ::core::option::Option, -} -/// Runtime information. This is loosely defined to allow for extensibility. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RuntimeMetadata { - /// Type of runtime. - #[prost(enumeration = "runtime_metadata::RuntimeType", tag = "1")] - pub r#type: i32, - /// Version of the runtime. All versions should be backward compatible. However, certain cases call for version - /// checks to ensure tighter validation or setting expectations. - #[prost(string, tag = "2")] - pub version: ::prost::alloc::string::String, - /// +optional It can be used to provide extra information about the runtime (e.g. python, golang... etc.). - #[prost(string, tag = "3")] - pub flavor: ::prost::alloc::string::String, -} -/// Nested message and enum types in `RuntimeMetadata`. -pub mod runtime_metadata { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum RuntimeType { - Other = 0, - FlyteSdk = 1, - } - impl RuntimeType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - RuntimeType::Other => "OTHER", - RuntimeType::FlyteSdk => "FLYTE_SDK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "OTHER" => Some(Self::Other), - "FLYTE_SDK" => Some(Self::FlyteSdk), - _ => None, - } - } - } -} -/// Task Metadata -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskMetadata { - /// Indicates whether the system should attempt to lookup this task's output to avoid duplication of work. - #[prost(bool, tag = "1")] - pub discoverable: bool, - /// Runtime information about the task. - #[prost(message, optional, tag = "2")] - pub runtime: ::core::option::Option, - /// The overall timeout of a task including user-triggered retries. - #[prost(message, optional, tag = "4")] - pub timeout: ::core::option::Option<::prost_types::Duration>, - /// Number of retries per task. - #[prost(message, optional, tag = "5")] - pub retries: ::core::option::Option, - /// Indicates a logical version to apply to this task for the purpose of discovery. - #[prost(string, tag = "6")] - pub discovery_version: ::prost::alloc::string::String, - /// If set, this indicates that this task is deprecated. This will enable owners of tasks to notify consumers - /// of the ending of support for a given task. - #[prost(string, tag = "7")] - pub deprecated_error_message: ::prost::alloc::string::String, - /// Indicates whether the system should attempt to execute discoverable instances in serial to avoid duplicate work - #[prost(bool, tag = "9")] - pub cache_serializable: bool, - /// Indicates whether the task will generate a Deck URI when it finishes executing. - #[prost(bool, tag = "10")] - pub generates_deck: bool, - /// Arbitrary tags that allow users and the platform to store small but arbitrary labels - #[prost(map = "string, string", tag = "11")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// pod_template_name is the unique name of a PodTemplate k8s resource to be used as the base configuration if this - /// task creates a k8s Pod. If this value is set, the specified PodTemplate will be used instead of, but applied - /// identically as, the default PodTemplate configured in FlytePropeller. - #[prost(string, tag = "12")] - pub pod_template_name: ::prost::alloc::string::String, - /// cache_ignore_input_vars is the input variables that should not be included when calculating hash for cache. - #[prost(string, repeated, tag = "13")] - pub cache_ignore_input_vars: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, - /// Identify whether task is interruptible - #[prost(oneof = "task_metadata::InterruptibleValue", tags = "8")] - pub interruptible_value: ::core::option::Option, -} -/// Nested message and enum types in `TaskMetadata`. -pub mod task_metadata { - /// Identify whether task is interruptible - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum InterruptibleValue { - #[prost(bool, tag = "8")] - Interruptible(bool), - } -} -/// A Task structure that uniquely identifies a task in the system -/// Tasks are registered as a first step in the system. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskTemplate { - /// Auto generated taskId by the system. Task Id uniquely identifies this task globally. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// A predefined yet extensible Task type identifier. This can be used to customize any of the components. If no - /// extensions are provided in the system, Flyte will resolve the this task to its TaskCategory and default the - /// implementation registered for the TaskCategory. - #[prost(string, tag = "2")] - pub r#type: ::prost::alloc::string::String, - /// Extra metadata about the task. - #[prost(message, optional, tag = "3")] - pub metadata: ::core::option::Option, - /// A strongly typed interface for the task. This enables others to use this task within a workflow and guarantees - /// compile-time validation of the workflow to avoid costly runtime failures. - #[prost(message, optional, tag = "4")] - pub interface: ::core::option::Option, - /// Custom data about the task. This is extensible to allow various plugins in the system. - #[prost(message, optional, tag = "5")] - pub custom: ::core::option::Option<::prost_types::Struct>, - /// This can be used to customize task handling at execution time for the same task type. - #[prost(int32, tag = "7")] - pub task_type_version: i32, - /// security_context encapsulates security attributes requested to run this task. - #[prost(message, optional, tag = "8")] - pub security_context: ::core::option::Option, - /// Encapsulates all non-standard resources, not captured by - /// v1.ResourceRequirements, to allocate to a task. - #[prost(message, optional, tag = "9")] - pub extended_resources: ::core::option::Option, - /// Metadata about the custom defined for this task. This is extensible to allow various plugins in the system - /// to use as required. - /// reserve the field numbers 1 through 15 for very frequently occurring message elements - #[prost(map = "string, string", tag = "16")] - pub config: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. - /// If no corresponding execution-layer plugins are found, the system will default to handling these using built-in - /// handlers. - #[prost(oneof = "task_template::Target", tags = "6, 17, 18")] - pub target: ::core::option::Option, -} -/// Nested message and enum types in `TaskTemplate`. -pub mod task_template { - /// Known target types that the system will guarantee plugins for. Custom SDK plugins are allowed to set these if needed. - /// If no corresponding execution-layer plugins are found, the system will default to handling these using built-in - /// handlers. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Target { - #[prost(message, tag = "6")] - Container(super::Container), - #[prost(message, tag = "17")] - K8sPod(super::K8sPod), - #[prost(message, tag = "18")] - Sql(super::Sql), - } -} -/// Defines port properties for a container. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ContainerPort { - /// Number of port to expose on the pod's IP address. - /// This must be a valid port number, 0 < x < 65536. - #[prost(uint32, tag = "1")] - pub container_port: u32, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Container { - /// Container image url. Eg: docker/redis:latest - #[prost(string, tag = "1")] - pub image: ::prost::alloc::string::String, - /// Command to be executed, if not provided, the default entrypoint in the container image will be used. - #[prost(string, repeated, tag = "2")] - pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// These will default to Flyte given paths. If provided, the system will not append known paths. If the task still - /// needs flyte's inputs and outputs path, add $(FLYTE_INPUT_FILE), $(FLYTE_OUTPUT_FILE) wherever makes sense and the - /// system will populate these before executing the container. - #[prost(string, repeated, tag = "3")] - pub args: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Container resources requirement as specified by the container engine. - #[prost(message, optional, tag = "4")] - pub resources: ::core::option::Option, - /// Environment variables will be set as the container is starting up. - #[prost(message, repeated, tag = "5")] - pub env: ::prost::alloc::vec::Vec, - /// Allows extra configs to be available for the container. - /// TODO: elaborate on how configs will become available. - /// Deprecated, please use TaskTemplate.config instead. - #[deprecated] - #[prost(message, repeated, tag = "6")] - pub config: ::prost::alloc::vec::Vec, - /// Ports to open in the container. This feature is not supported by all execution engines. (e.g. supported on K8s but - /// not supported on AWS Batch) - /// Only K8s - #[prost(message, repeated, tag = "7")] - pub ports: ::prost::alloc::vec::Vec, - /// BETA: Optional configuration for DataLoading. If not specified, then default values are used. - /// This makes it possible to to run a completely portable container, that uses inputs and outputs - /// only from the local file-system and without having any reference to flyteidl. This is supported only on K8s at the moment. - /// If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories - /// are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation - /// to understand the default paths. - /// Only K8s - #[prost(message, optional, tag = "9")] - pub data_config: ::core::option::Option, - #[prost(enumeration = "container::Architecture", tag = "10")] - pub architecture: i32, -} -/// Nested message and enum types in `Container`. -pub mod container { - /// Architecture-type the container image supports. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Architecture { - Unknown = 0, - Amd64 = 1, - Arm64 = 2, - ArmV6 = 3, - ArmV7 = 4, - } - impl Architecture { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Architecture::Unknown => "UNKNOWN", - Architecture::Amd64 => "AMD64", - Architecture::Arm64 => "ARM64", - Architecture::ArmV6 => "ARM_V6", - Architecture::ArmV7 => "ARM_V7", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "AMD64" => Some(Self::Amd64), - "ARM64" => Some(Self::Arm64), - "ARM_V6" => Some(Self::ArmV6), - "ARM_V7" => Some(Self::ArmV7), - _ => None, - } - } - } -} -/// Strategy to use when dealing with Blob, Schema, or multipart blob data (large datasets) -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct IoStrategy { - /// Mode to use to manage downloads - #[prost(enumeration = "io_strategy::DownloadMode", tag = "1")] - pub download_mode: i32, - /// Mode to use to manage uploads - #[prost(enumeration = "io_strategy::UploadMode", tag = "2")] - pub upload_mode: i32, -} -/// Nested message and enum types in `IOStrategy`. -pub mod io_strategy { - /// Mode to use for downloading - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum DownloadMode { - /// All data will be downloaded before the main container is executed - DownloadEager = 0, - /// Data will be downloaded as a stream and an End-Of-Stream marker will be written to indicate all data has been downloaded. Refer to protocol for details - DownloadStream = 1, - /// Large objects (offloaded) will not be downloaded - DoNotDownload = 2, - } - impl DownloadMode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - DownloadMode::DownloadEager => "DOWNLOAD_EAGER", - DownloadMode::DownloadStream => "DOWNLOAD_STREAM", - DownloadMode::DoNotDownload => "DO_NOT_DOWNLOAD", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DOWNLOAD_EAGER" => Some(Self::DownloadEager), - "DOWNLOAD_STREAM" => Some(Self::DownloadStream), - "DO_NOT_DOWNLOAD" => Some(Self::DoNotDownload), - _ => None, - } - } - } - /// Mode to use for uploading - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum UploadMode { - /// All data will be uploaded after the main container exits - UploadOnExit = 0, - /// Data will be uploaded as it appears. Refer to protocol specification for details - UploadEager = 1, - /// Data will not be uploaded, only references will be written - DoNotUpload = 2, - } - impl UploadMode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - UploadMode::UploadOnExit => "UPLOAD_ON_EXIT", - UploadMode::UploadEager => "UPLOAD_EAGER", - UploadMode::DoNotUpload => "DO_NOT_UPLOAD", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UPLOAD_ON_EXIT" => Some(Self::UploadOnExit), - "UPLOAD_EAGER" => Some(Self::UploadEager), - "DO_NOT_UPLOAD" => Some(Self::DoNotUpload), - _ => None, - } - } - } -} -/// This configuration allows executing raw containers in Flyte using the Flyte CoPilot system. -/// Flyte CoPilot, eliminates the needs of flytekit or sdk inside the container. Any inputs required by the users container are side-loaded in the input_path -/// Any outputs generated by the user container - within output_path are automatically uploaded. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DataLoadingConfig { - /// Flag enables DataLoading Config. If this is not set, data loading will not be used! - #[prost(bool, tag = "1")] - pub enabled: bool, - /// File system path (start at root). This folder will contain all the inputs exploded to a separate file. - /// Example, if the input interface needs (x: int, y: blob, z: multipart_blob) and the input path is '/var/flyte/inputs', then the file system will look like - /// /var/flyte/inputs/inputs. .pb .json .yaml> -> Format as defined previously. The Blob and Multipart blob will reference local filesystem instead of remote locations - /// /var/flyte/inputs/x -> X is a file that contains the value of x (integer) in string format - /// /var/flyte/inputs/y -> Y is a file in Binary format - /// /var/flyte/inputs/z/... -> Note Z itself is a directory - /// More information about the protocol - refer to docs #TODO reference docs here - #[prost(string, tag = "2")] - pub input_path: ::prost::alloc::string::String, - /// File system path (start at root). This folder should contain all the outputs for the task as individual files and/or an error text file - #[prost(string, tag = "3")] - pub output_path: ::prost::alloc::string::String, - /// In the inputs folder, there will be an additional summary/metadata file that contains references to all files or inlined primitive values. - /// This format decides the actual encoding for the data. Refer to the encoding to understand the specifics of the contents and the encoding - #[prost(enumeration = "data_loading_config::LiteralMapFormat", tag = "4")] - pub format: i32, - #[prost(message, optional, tag = "5")] - pub io_strategy: ::core::option::Option, -} -/// Nested message and enum types in `DataLoadingConfig`. -pub mod data_loading_config { - /// LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers. - /// If the user has access to the protocol buffer definitions, it is recommended to use the PROTO format. - /// JSON and YAML do not need any protobuf definitions to read it - /// All remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem) - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum LiteralMapFormat { - /// JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - - Json = 0, - Yaml = 1, - /// Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core - Proto = 2, - } - impl LiteralMapFormat { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - LiteralMapFormat::Json => "JSON", - LiteralMapFormat::Yaml => "YAML", - LiteralMapFormat::Proto => "PROTO", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "JSON" => Some(Self::Json), - "YAML" => Some(Self::Yaml), - "PROTO" => Some(Self::Proto), - _ => None, - } - } - } -} -/// Defines a pod spec and additional pod metadata that is created when a task is executed. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct K8sPod { - /// Contains additional metadata for building a kubernetes pod. - #[prost(message, optional, tag = "1")] - pub metadata: ::core::option::Option, - /// Defines the primary pod spec created when a task is executed. - /// This should be a JSON-marshalled pod spec, which can be defined in - /// - go, using: - /// - python: using - #[prost(message, optional, tag = "2")] - pub pod_spec: ::core::option::Option<::prost_types::Struct>, - /// BETA: Optional configuration for DataLoading. If not specified, then default values are used. - /// This makes it possible to to run a completely portable container, that uses inputs and outputs - /// only from the local file-system and without having any reference to flytekit. This is supported only on K8s at the moment. - /// If data loading is enabled, then data will be mounted in accompanying directories specified in the DataLoadingConfig. If the directories - /// are not specified, inputs will be mounted onto and outputs will be uploaded from a pre-determined file-system path. Refer to the documentation - /// to understand the default paths. - /// Only K8s - #[prost(message, optional, tag = "3")] - pub data_config: ::core::option::Option, -} -/// Metadata for building a kubernetes object when a task is executed. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct K8sObjectMetadata { - /// Optional labels to add to the pod definition. - #[prost(map = "string, string", tag = "1")] - pub labels: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Optional annotations to add to the pod definition. - #[prost(map = "string, string", tag = "2")] - pub annotations: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Sql represents a generic sql workload with a statement and dialect. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Sql { - /// The actual query to run, the query can have templated parameters. - /// We use Flyte's Golang templating format for Query templating. - /// For example, - /// insert overwrite directory '{{ .rawOutputDataPrefix }}' stored as parquet - /// select * - /// from my_table - /// where ds = '{{ .Inputs.ds }}' - #[prost(string, tag = "1")] - pub statement: ::prost::alloc::string::String, - #[prost(enumeration = "sql::Dialect", tag = "2")] - pub dialect: i32, -} -/// Nested message and enum types in `Sql`. -pub mod sql { - /// The dialect of the SQL statement. This is used to validate and parse SQL statements at compilation time to avoid - /// expensive runtime operations. If set to an unsupported dialect, no validation will be done on the statement. - /// We support the following dialect: ansi, hive. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Dialect { - Undefined = 0, - Ansi = 1, - Hive = 2, - Other = 3, - } - impl Dialect { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Dialect::Undefined => "UNDEFINED", - Dialect::Ansi => "ANSI", - Dialect::Hive => "HIVE", - Dialect::Other => "OTHER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNDEFINED" => Some(Self::Undefined), - "ANSI" => Some(Self::Ansi), - "HIVE" => Some(Self::Hive), - "OTHER" => Some(Self::Other), - _ => None, - } - } - } -} -/// Defines a 2-level tree where the root is a comparison operator and Operands are primitives or known variables. -/// Each expression results in a boolean result. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ComparisonExpression { - #[prost(enumeration = "comparison_expression::Operator", tag = "1")] - pub operator: i32, - #[prost(message, optional, tag = "2")] - pub left_value: ::core::option::Option, - #[prost(message, optional, tag = "3")] - pub right_value: ::core::option::Option, -} -/// Nested message and enum types in `ComparisonExpression`. -pub mod comparison_expression { - /// Binary Operator for each expression - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Operator { - Eq = 0, - Neq = 1, - /// Greater Than - Gt = 2, - Gte = 3, - /// Less Than - Lt = 4, - Lte = 5, - } - impl Operator { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Operator::Eq => "EQ", - Operator::Neq => "NEQ", - Operator::Gt => "GT", - Operator::Gte => "GTE", - Operator::Lt => "LT", - Operator::Lte => "LTE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "EQ" => Some(Self::Eq), - "NEQ" => Some(Self::Neq), - "GT" => Some(Self::Gt), - "GTE" => Some(Self::Gte), - "LT" => Some(Self::Lt), - "LTE" => Some(Self::Lte), - _ => None, - } - } - } -} -/// Defines an operand to a comparison expression. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Operand { - #[prost(oneof = "operand::Val", tags = "1, 2, 3")] - pub val: ::core::option::Option, -} -/// Nested message and enum types in `Operand`. -pub mod operand { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Val { - /// Can be a constant - #[prost(message, tag = "1")] - Primitive(super::Primitive), - /// Or one of this node's input variables - #[prost(string, tag = "2")] - Var(::prost::alloc::string::String), - /// Replace the primitive field - #[prost(message, tag = "3")] - Scalar(super::Scalar), - } -} -/// Defines a boolean expression tree. It can be a simple or a conjunction expression. -/// Multiple expressions can be combined using a conjunction or a disjunction to result in a final boolean result. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BooleanExpression { - #[prost(oneof = "boolean_expression::Expr", tags = "1, 2")] - pub expr: ::core::option::Option, -} -/// Nested message and enum types in `BooleanExpression`. -pub mod boolean_expression { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Expr { - #[prost(message, tag = "1")] - Conjunction(::prost::alloc::boxed::Box), - #[prost(message, tag = "2")] - Comparison(super::ComparisonExpression), - } -} -/// Defines a conjunction expression of two boolean expressions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ConjunctionExpression { - #[prost(enumeration = "conjunction_expression::LogicalOperator", tag = "1")] - pub operator: i32, - #[prost(message, optional, boxed, tag = "2")] - pub left_expression: ::core::option::Option< - ::prost::alloc::boxed::Box, - >, - #[prost(message, optional, boxed, tag = "3")] - pub right_expression: ::core::option::Option< - ::prost::alloc::boxed::Box, - >, -} -/// Nested message and enum types in `ConjunctionExpression`. -pub mod conjunction_expression { - /// Nested conditions. They can be conjoined using AND / OR - /// Order of evaluation is not important as the operators are Commutative - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum LogicalOperator { - /// Conjunction - And = 0, - Or = 1, - } - impl LogicalOperator { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - LogicalOperator::And => "AND", - LogicalOperator::Or => "OR", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "AND" => Some(Self::And), - "OR" => Some(Self::Or), - _ => None, - } - } - } -} -/// Defines a condition and the execution unit that should be executed if the condition is satisfied. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct IfBlock { - #[prost(message, optional, tag = "1")] - pub condition: ::core::option::Option, - #[prost(message, optional, boxed, tag = "2")] - pub then_node: ::core::option::Option<::prost::alloc::boxed::Box>, -} -/// Defines a series of if/else blocks. The first branch whose condition evaluates to true is the one to execute. -/// If no conditions were satisfied, the else_node or the error will execute. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct IfElseBlock { - /// +required. First condition to evaluate. - #[prost(message, optional, boxed, tag = "1")] - pub case: ::core::option::Option<::prost::alloc::boxed::Box>, - /// +optional. Additional branches to evaluate. - #[prost(message, repeated, tag = "2")] - pub other: ::prost::alloc::vec::Vec, - /// +required. - #[prost(oneof = "if_else_block::Default", tags = "3, 4")] - pub default: ::core::option::Option, -} -/// Nested message and enum types in `IfElseBlock`. -pub mod if_else_block { - /// +required. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Default { - /// The node to execute in case none of the branches were taken. - #[prost(message, tag = "3")] - ElseNode(::prost::alloc::boxed::Box), - /// An error to throw in case none of the branches were taken. - #[prost(message, tag = "4")] - Error(super::Error), - } -} -/// BranchNode is a special node that alter the flow of the workflow graph. It allows the control flow to branch at -/// runtime based on a series of conditions that get evaluated on various parameters (e.g. inputs, primitives). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BranchNode { - /// +required - #[prost(message, optional, boxed, tag = "1")] - pub if_else: ::core::option::Option<::prost::alloc::boxed::Box>, -} -/// Refers to the task that the Node is to execute. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskNode { - /// Optional overrides applied at task execution time. - #[prost(message, optional, tag = "2")] - pub overrides: ::core::option::Option, - #[prost(oneof = "task_node::Reference", tags = "1")] - pub reference: ::core::option::Option, -} -/// Nested message and enum types in `TaskNode`. -pub mod task_node { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Reference { - /// A globally unique identifier for the task. - #[prost(message, tag = "1")] - ReferenceId(super::Identifier), - } -} -/// Refers to a the workflow the node is to execute. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowNode { - #[prost(oneof = "workflow_node::Reference", tags = "1, 2")] - pub reference: ::core::option::Option, -} -/// Nested message and enum types in `WorkflowNode`. -pub mod workflow_node { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Reference { - /// A globally unique identifier for the launch plan. - #[prost(message, tag = "1")] - LaunchplanRef(super::Identifier), - /// Reference to a subworkflow, that should be defined with the compiler context - #[prost(message, tag = "2")] - SubWorkflowRef(super::Identifier), - } -} -/// ApproveCondition represents a dependency on an external approval. During execution, this will manifest as a boolean -/// signal with the provided signal_id. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ApproveCondition { - /// A unique identifier for the requested boolean signal. - #[prost(string, tag = "1")] - pub signal_id: ::prost::alloc::string::String, -} -/// SignalCondition represents a dependency on an signal. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SignalCondition { - /// A unique identifier for the requested signal. - #[prost(string, tag = "1")] - pub signal_id: ::prost::alloc::string::String, - /// A type denoting the required value type for this signal. - #[prost(message, optional, tag = "2")] - pub r#type: ::core::option::Option, - /// The variable name for the signal value in this nodes outputs. - #[prost(string, tag = "3")] - pub output_variable_name: ::prost::alloc::string::String, -} -/// SleepCondition represents a dependency on waiting for the specified duration. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SleepCondition { - /// The overall duration for this sleep. - #[prost(message, optional, tag = "1")] - pub duration: ::core::option::Option<::prost_types::Duration>, -} -/// GateNode refers to the condition that is required for the gate to successfully complete. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GateNode { - #[prost(oneof = "gate_node::Condition", tags = "1, 2, 3")] - pub condition: ::core::option::Option, -} -/// Nested message and enum types in `GateNode`. -pub mod gate_node { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Condition { - /// ApproveCondition represents a dependency on an external approval provided by a boolean signal. - #[prost(message, tag = "1")] - Approve(super::ApproveCondition), - /// SignalCondition represents a dependency on an signal. - #[prost(message, tag = "2")] - Signal(super::SignalCondition), - /// SleepCondition represents a dependency on waiting for the specified duration. - #[prost(message, tag = "3")] - Sleep(super::SleepCondition), - } -} -/// ArrayNode is a Flyte node type that simplifies the execution of a sub-node over a list of input -/// values. An ArrayNode can be executed with configurable parallelism (separate from the parent -/// workflow) and can be configured to succeed when a certain number of sub-nodes succeed. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArrayNode { - /// node is the sub-node that will be executed for each element in the array. - #[prost(message, optional, boxed, tag = "1")] - pub node: ::core::option::Option<::prost::alloc::boxed::Box>, - /// parallelism defines the minimum number of instances to bring up concurrently at any given - /// point. Note that this is an optimistic restriction and that, due to network partitioning or - /// other failures, the actual number of currently running instances might be more. This has to - /// be a positive number if assigned. Default value is size. - #[prost(uint32, tag = "2")] - pub parallelism: u32, - #[prost(oneof = "array_node::SuccessCriteria", tags = "3, 4")] - pub success_criteria: ::core::option::Option, -} -/// Nested message and enum types in `ArrayNode`. -pub mod array_node { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SuccessCriteria { - /// min_successes is an absolute number of the minimum number of successful completions of - /// sub-nodes. As soon as this criteria is met, the ArrayNode will be marked as successful - /// and outputs will be computed. This has to be a non-negative number if assigned. Default - /// value is size (if specified). - #[prost(uint32, tag = "3")] - MinSuccesses(u32), - /// If the array job size is not known beforehand, the min_success_ratio can instead be used - /// to determine when an ArrayNode can be marked successful. - #[prost(float, tag = "4")] - MinSuccessRatio(f32), - } -} -/// Defines extra information about the Node. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeMetadata { - /// A friendly name for the Node - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The overall timeout of a task. - #[prost(message, optional, tag = "4")] - pub timeout: ::core::option::Option<::prost_types::Duration>, - /// Number of retries per task. - #[prost(message, optional, tag = "5")] - pub retries: ::core::option::Option, - /// Identify whether node is interruptible - #[prost(oneof = "node_metadata::InterruptibleValue", tags = "6")] - pub interruptible_value: ::core::option::Option, - /// Identify whether a node should have it's outputs cached. - #[prost(oneof = "node_metadata::CacheableValue", tags = "7")] - pub cacheable_value: ::core::option::Option, - /// The version of the cache to use. - #[prost(oneof = "node_metadata::CacheVersionValue", tags = "8")] - pub cache_version_value: ::core::option::Option, - /// Identify whether caching operations involving this node should be serialized. - #[prost(oneof = "node_metadata::CacheSerializableValue", tags = "9")] - pub cache_serializable_value: ::core::option::Option< - node_metadata::CacheSerializableValue, - >, -} -/// Nested message and enum types in `NodeMetadata`. -pub mod node_metadata { - /// Identify whether node is interruptible - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum InterruptibleValue { - #[prost(bool, tag = "6")] - Interruptible(bool), - } - /// Identify whether a node should have it's outputs cached. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum CacheableValue { - #[prost(bool, tag = "7")] - Cacheable(bool), - } - /// The version of the cache to use. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum CacheVersionValue { - #[prost(string, tag = "8")] - CacheVersion(::prost::alloc::string::String), - } - /// Identify whether caching operations involving this node should be serialized. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum CacheSerializableValue { - #[prost(bool, tag = "9")] - CacheSerializable(bool), - } -} -/// Links a variable to an alias. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Alias { - /// Must match one of the output variable names on a node. - #[prost(string, tag = "1")] - pub var: ::prost::alloc::string::String, - /// A workflow-level unique alias that downstream nodes can refer to in their input. - #[prost(string, tag = "2")] - pub alias: ::prost::alloc::string::String, -} -/// A Workflow graph Node. One unit of execution in the graph. Each node can be linked to a Task, a Workflow or a branch -/// node. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Node { - /// A workflow-level unique identifier that identifies this node in the workflow. 'inputs' and 'outputs' are reserved - /// node ids that cannot be used by other nodes. - #[prost(string, tag = "1")] - pub id: ::prost::alloc::string::String, - /// Extra metadata about the node. - #[prost(message, optional, tag = "2")] - pub metadata: ::core::option::Option, - /// Specifies how to bind the underlying interface's inputs. All required inputs specified in the underlying interface - /// must be fulfilled. - #[prost(message, repeated, tag = "3")] - pub inputs: ::prost::alloc::vec::Vec, - /// +optional Specifies execution dependency for this node ensuring it will only get scheduled to run after all its - /// upstream nodes have completed. This node will have an implicit dependency on any node that appears in inputs - /// field. - #[prost(string, repeated, tag = "4")] - pub upstream_node_ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// +optional. A node can define aliases for a subset of its outputs. This is particularly useful if different nodes - /// need to conform to the same interface (e.g. all branches in a branch node). Downstream nodes must refer to this - /// nodes outputs using the alias if one's specified. - #[prost(message, repeated, tag = "5")] - pub output_aliases: ::prost::alloc::vec::Vec, - /// Information about the target to execute in this node. - #[prost(oneof = "node::Target", tags = "6, 7, 8, 9, 10")] - pub target: ::core::option::Option, -} -/// Nested message and enum types in `Node`. -pub mod node { - /// Information about the target to execute in this node. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Target { - /// Information about the Task to execute in this node. - #[prost(message, tag = "6")] - TaskNode(super::TaskNode), - /// Information about the Workflow to execute in this mode. - #[prost(message, tag = "7")] - WorkflowNode(super::WorkflowNode), - /// Information about the branch node to evaluate in this node. - #[prost(message, tag = "8")] - BranchNode(::prost::alloc::boxed::Box), - /// Information about the condition to evaluate in this node. - #[prost(message, tag = "9")] - GateNode(super::GateNode), - /// Information about the sub-node executions for each value in the list of this nodes - /// inputs values. - #[prost(message, tag = "10")] - ArrayNode(::prost::alloc::boxed::Box), - } -} -/// This is workflow layer metadata. These settings are only applicable to the workflow as a whole, and do not -/// percolate down to child entities (like tasks) launched by the workflow. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowMetadata { - /// Indicates the runtime priority of workflow executions. - #[prost(message, optional, tag = "1")] - pub quality_of_service: ::core::option::Option, - /// Defines how the system should behave when a failure is detected in the workflow execution. - #[prost(enumeration = "workflow_metadata::OnFailurePolicy", tag = "2")] - pub on_failure: i32, - /// Arbitrary tags that allow users and the platform to store small but arbitrary labels - #[prost(map = "string, string", tag = "3")] - pub tags: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Nested message and enum types in `WorkflowMetadata`. -pub mod workflow_metadata { - /// Failure Handling Strategy - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum OnFailurePolicy { - /// FAIL_IMMEDIATELY instructs the system to fail as soon as a node fails in the workflow. It'll automatically - /// abort all currently running nodes and clean up resources before finally marking the workflow executions as - /// failed. - FailImmediately = 0, - /// FAIL_AFTER_EXECUTABLE_NODES_COMPLETE instructs the system to make as much progress as it can. The system will - /// not alter the dependencies of the execution graph so any node that depend on the failed node will not be run. - /// Other nodes that will be executed to completion before cleaning up resources and marking the workflow - /// execution as failed. - FailAfterExecutableNodesComplete = 1, - } - impl OnFailurePolicy { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - OnFailurePolicy::FailImmediately => "FAIL_IMMEDIATELY", - OnFailurePolicy::FailAfterExecutableNodesComplete => { - "FAIL_AFTER_EXECUTABLE_NODES_COMPLETE" - } - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FAIL_IMMEDIATELY" => Some(Self::FailImmediately), - "FAIL_AFTER_EXECUTABLE_NODES_COMPLETE" => { - Some(Self::FailAfterExecutableNodesComplete) - } - _ => None, - } - } - } -} -/// The difference between these settings and the WorkflowMetadata ones is that these are meant to be passed down to -/// a workflow's underlying entities (like tasks). For instance, 'interruptible' has no meaning at the workflow layer, it -/// is only relevant when a task executes. The settings here are the defaults that are passed to all nodes -/// unless explicitly overridden at the node layer. -/// If you are adding a setting that applies to both the Workflow itself, and everything underneath it, it should be -/// added to both this object and the WorkflowMetadata object above. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowMetadataDefaults { - /// Whether child nodes of the workflow are interruptible. - #[prost(bool, tag = "1")] - pub interruptible: bool, -} -/// Flyte Workflow Structure that encapsulates task, branch and subworkflow nodes to form a statically analyzable, -/// directed acyclic graph. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowTemplate { - /// A globally unique identifier for the workflow. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Extra metadata about the workflow. - #[prost(message, optional, tag = "2")] - pub metadata: ::core::option::Option, - /// Defines a strongly typed interface for the Workflow. This can include some optional parameters. - #[prost(message, optional, tag = "3")] - pub interface: ::core::option::Option, - /// A list of nodes. In addition, 'globals' is a special reserved node id that can be used to consume workflow inputs. - #[prost(message, repeated, tag = "4")] - pub nodes: ::prost::alloc::vec::Vec, - /// A list of output bindings that specify how to construct workflow outputs. Bindings can pull node outputs or - /// specify literals. All workflow outputs specified in the interface field must be bound in order for the workflow - /// to be validated. A workflow has an implicit dependency on all of its nodes to execute successfully in order to - /// bind final outputs. - /// Most of these outputs will be Binding's with a BindingData of type OutputReference. That is, your workflow can - /// just have an output of some constant (`Output(5)`), but usually, the workflow will be pulling - /// outputs from the output of a task. - #[prost(message, repeated, tag = "5")] - pub outputs: ::prost::alloc::vec::Vec, - /// +optional A catch-all node. This node is executed whenever the execution engine determines the workflow has failed. - /// The interface of this node must match the Workflow interface with an additional input named 'error' of type - /// pb.lyft.flyte.core.Error. - #[prost(message, optional, tag = "6")] - pub failure_node: ::core::option::Option, - /// workflow defaults - #[prost(message, optional, tag = "7")] - pub metadata_defaults: ::core::option::Option, -} -/// Optional task node overrides that will be applied at task execution time. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskNodeOverrides { - /// A customizable interface to convey resources requested for a task container. - #[prost(message, optional, tag = "1")] - pub resources: ::core::option::Option, - /// Overrides for all non-standard resources, not captured by - /// v1.ResourceRequirements, to allocate to a task. - #[prost(message, optional, tag = "2")] - pub extended_resources: ::core::option::Option, - /// Override for the image used by task pods. - #[prost(string, tag = "3")] - pub container_image: ::prost::alloc::string::String, -} -/// A structure that uniquely identifies a launch plan in the system. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct LaunchPlanTemplate { - /// A globally unique identifier for the launch plan. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// The input and output interface for the launch plan - #[prost(message, optional, tag = "2")] - pub interface: ::core::option::Option, - /// A collection of input literals that are fixed for the launch plan - #[prost(message, optional, tag = "3")] - pub fixed_inputs: ::core::option::Option, -} -/// Span represents a duration trace of Flyte execution. The id field denotes a Flyte execution entity or an operation -/// which uniquely identifies the Span. The spans attribute allows this Span to be further broken down into more -/// precise definitions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Span { - /// start_time defines the instance this span began. - #[prost(message, optional, tag = "1")] - pub start_time: ::core::option::Option<::prost_types::Timestamp>, - /// end_time defines the instance this span completed. - #[prost(message, optional, tag = "2")] - pub end_time: ::core::option::Option<::prost_types::Timestamp>, - /// spans defines a collection of Spans that breakdown this execution. - #[prost(message, repeated, tag = "7")] - pub spans: ::prost::alloc::vec::Vec, - #[prost(oneof = "span::Id", tags = "3, 4, 5, 6")] - pub id: ::core::option::Option, -} -/// Nested message and enum types in `Span`. -pub mod span { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Id { - /// workflow_id is the id of the workflow execution this Span represents. - #[prost(message, tag = "3")] - WorkflowId(super::WorkflowExecutionIdentifier), - /// node_id is the id of the node execution this Span represents. - #[prost(message, tag = "4")] - NodeId(super::NodeExecutionIdentifier), - /// task_id is the id of the task execution this Span represents. - #[prost(message, tag = "5")] - TaskId(super::TaskExecutionIdentifier), - /// operation_id is the id of a unique operation that this Span represents. - #[prost(string, tag = "6")] - OperationId(::prost::alloc::string::String), - } -} -/// ExecutionMetrics is a collection of metrics that are collected during the execution of a Flyte task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExecutionMetricResult { - /// The metric this data represents. e.g. EXECUTION_METRIC_USED_CPU_AVG or EXECUTION_METRIC_USED_MEMORY_BYTES_AVG. - #[prost(string, tag = "1")] - pub metric: ::prost::alloc::string::String, - /// The result data in prometheus range query result format - /// - /// This may include multiple time series, differentiated by their metric labels. - /// Start time is greater of (execution attempt start, 48h ago) - /// End time is lesser of (execution attempt end, now) - #[prost(message, optional, tag = "2")] - pub data: ::core::option::Option<::prost_types::Struct>, -} -/// Adjacency list for the workflow. This is created as part of the compilation process. Every process after the compilation -/// step uses this created ConnectionSet -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ConnectionSet { - /// A list of all the node ids that are downstream from a given node id - #[prost(map = "string, message", tag = "7")] - pub downstream: ::std::collections::HashMap< - ::prost::alloc::string::String, - connection_set::IdList, - >, - /// A list of all the node ids, that are upstream of this node id - #[prost(map = "string, message", tag = "8")] - pub upstream: ::std::collections::HashMap< - ::prost::alloc::string::String, - connection_set::IdList, - >, -} -/// Nested message and enum types in `ConnectionSet`. -pub mod connection_set { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct IdList { - #[prost(string, repeated, tag = "1")] - pub ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - } -} -/// Output of the compilation Step. This object represents one workflow. We store more metadata at this layer -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CompiledWorkflow { - /// Completely contained Workflow Template - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, - /// For internal use only! This field is used by the system and must not be filled in. Any values set will be ignored. - #[prost(message, optional, tag = "2")] - pub connections: ::core::option::Option, -} -/// Output of the compilation step. This object represents one LaunchPlan. We store more metadata at this layer -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CompiledLaunchPlan { - /// Completely contained LaunchPlan Template - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, -} -/// Output of the Compilation step. This object represent one Task. We store more metadata at this layer -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CompiledTask { - /// Completely contained TaskTemplate - #[prost(message, optional, tag = "1")] - pub template: ::core::option::Option, -} -/// A Compiled Workflow Closure contains all the information required to start a new execution, or to visualize a workflow -/// and its details. The CompiledWorkflowClosure should always contain a primary workflow, that is the main workflow that -/// will being the execution. All subworkflows are denormalized. WorkflowNodes refer to the workflow identifiers of -/// compiled subworkflows. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CompiledWorkflowClosure { - /// +required - #[prost(message, optional, tag = "1")] - pub primary: ::core::option::Option, - /// Guaranteed that there will only exist one and only one workflow with a given id, i.e., every sub workflow has a - /// unique identifier. Also every enclosed subworkflow is used either by a primary workflow or by a subworkflow - /// as an inlined workflow - /// +optional - #[prost(message, repeated, tag = "2")] - pub sub_workflows: ::prost::alloc::vec::Vec, - /// Guaranteed that there will only exist one and only one task with a given id, i.e., every task has a unique id - /// +required (at least 1) - #[prost(message, repeated, tag = "3")] - pub tasks: ::prost::alloc::vec::Vec, - /// A collection of launch plans that are compiled. Guaranteed that there will only exist one and only one launch plan - /// with a given id, i.e., every launch plan has a unique id. - #[prost(message, repeated, tag = "4")] - pub launch_plans: ::prost::alloc::vec::Vec, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CatalogArtifactTag { - /// Artifact ID is generated name - #[prost(string, tag = "1")] - pub artifact_id: ::prost::alloc::string::String, - /// Flyte computes the tag automatically, as the hash of the values - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, -} -/// Catalog artifact information with specific metadata -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CatalogMetadata { - /// Dataset ID in the catalog - #[prost(message, optional, tag = "1")] - pub dataset_id: ::core::option::Option, - /// Artifact tag in the catalog - #[prost(message, optional, tag = "2")] - pub artifact_tag: ::core::option::Option, - /// Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context - #[prost(oneof = "catalog_metadata::SourceExecution", tags = "3")] - pub source_execution: ::core::option::Option, -} -/// Nested message and enum types in `CatalogMetadata`. -pub mod catalog_metadata { - /// Optional: Source Execution identifier, if this dataset was generated by another execution in Flyte. This is a one-of field and will depend on the caching context - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SourceExecution { - /// Today we only support TaskExecutionIdentifier as a source, as catalog caching only works for task executions - #[prost(message, tag = "3")] - SourceTaskExecution(super::TaskExecutionIdentifier), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CatalogReservation {} -/// Nested message and enum types in `CatalogReservation`. -pub mod catalog_reservation { - /// Indicates the status of a catalog reservation operation. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Status { - /// Used to indicate that reservations are disabled - ReservationDisabled = 0, - /// Used to indicate that a reservation was successfully acquired or extended - ReservationAcquired = 1, - /// Used to indicate that an active reservation currently exists - ReservationExists = 2, - /// Used to indicate that the reservation has been successfully released - ReservationReleased = 3, - /// Used to indicate that a reservation operation resulted in failure - ReservationFailure = 4, - } - impl Status { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Status::ReservationDisabled => "RESERVATION_DISABLED", - Status::ReservationAcquired => "RESERVATION_ACQUIRED", - Status::ReservationExists => "RESERVATION_EXISTS", - Status::ReservationReleased => "RESERVATION_RELEASED", - Status::ReservationFailure => "RESERVATION_FAILURE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "RESERVATION_DISABLED" => Some(Self::ReservationDisabled), - "RESERVATION_ACQUIRED" => Some(Self::ReservationAcquired), - "RESERVATION_EXISTS" => Some(Self::ReservationExists), - "RESERVATION_RELEASED" => Some(Self::ReservationReleased), - "RESERVATION_FAILURE" => Some(Self::ReservationFailure), - _ => None, - } - } - } -} -/// Indicates the status of CatalogCaching. The reason why this is not embedded in TaskNodeMetadata is, that we may use for other types of nodes as well in the future -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum CatalogCacheStatus { - /// Used to indicate that caching was disabled - CacheDisabled = 0, - /// Used to indicate that the cache lookup resulted in no matches - CacheMiss = 1, - /// used to indicate that the associated artifact was a result of a previous execution - CacheHit = 2, - /// used to indicate that the resultant artifact was added to the cache - CachePopulated = 3, - /// Used to indicate that cache lookup failed because of an error - CacheLookupFailure = 4, - /// Used to indicate that cache lookup failed because of an error - CachePutFailure = 5, - /// Used to indicate the cache lookup was skipped - CacheSkipped = 6, - /// Used to indicate that the cache was evicted - CacheEvicted = 7, -} -impl CatalogCacheStatus { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - CatalogCacheStatus::CacheDisabled => "CACHE_DISABLED", - CatalogCacheStatus::CacheMiss => "CACHE_MISS", - CatalogCacheStatus::CacheHit => "CACHE_HIT", - CatalogCacheStatus::CachePopulated => "CACHE_POPULATED", - CatalogCacheStatus::CacheLookupFailure => "CACHE_LOOKUP_FAILURE", - CatalogCacheStatus::CachePutFailure => "CACHE_PUT_FAILURE", - CatalogCacheStatus::CacheSkipped => "CACHE_SKIPPED", - CatalogCacheStatus::CacheEvicted => "CACHE_EVICTED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CACHE_DISABLED" => Some(Self::CacheDisabled), - "CACHE_MISS" => Some(Self::CacheMiss), - "CACHE_HIT" => Some(Self::CacheHit), - "CACHE_POPULATED" => Some(Self::CachePopulated), - "CACHE_LOOKUP_FAILURE" => Some(Self::CacheLookupFailure), - "CACHE_PUT_FAILURE" => Some(Self::CachePutFailure), - "CACHE_SKIPPED" => Some(Self::CacheSkipped), - "CACHE_EVICTED" => Some(Self::CacheEvicted), - _ => None, - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs deleted file mode 100644 index 46ec0ec406..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.event.rs +++ /dev/null @@ -1,398 +0,0 @@ -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowExecutionEvent { - /// Workflow execution id - #[prost(message, optional, tag = "1")] - pub execution_id: ::core::option::Option, - /// the id of the originator (Propeller) of the event - #[prost(string, tag = "2")] - pub producer_id: ::prost::alloc::string::String, - #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "3")] - pub phase: i32, - /// This timestamp represents when the original event occurred, it is generated - /// by the executor of the workflow. - #[prost(message, optional, tag = "4")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, - #[prost(oneof = "workflow_execution_event::OutputResult", tags = "5, 6, 7")] - pub output_result: ::core::option::Option, -} -/// Nested message and enum types in `WorkflowExecutionEvent`. -pub mod workflow_execution_event { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// URL to the output of the execution, it encodes all the information - /// including Cloud source provider. ie., s3://... - #[prost(string, tag = "5")] - OutputUri(::prost::alloc::string::String), - /// Error information for the execution - #[prost(message, tag = "6")] - Error(super::super::core::ExecutionError), - /// Raw output data produced by this workflow execution. - #[prost(message, tag = "7")] - OutputData(super::super::core::LiteralMap), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NodeExecutionEvent { - /// Unique identifier for this node execution - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// the id of the originator (Propeller) of the event - #[prost(string, tag = "2")] - pub producer_id: ::prost::alloc::string::String, - #[prost(enumeration = "super::core::node_execution::Phase", tag = "3")] - pub phase: i32, - /// This timestamp represents when the original event occurred, it is generated - /// by the executor of the node. - #[prost(message, optional, tag = "4")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, - /// \[To be deprecated\] Specifies which task (if any) launched this node. - #[prost(message, optional, tag = "9")] - pub parent_task_metadata: ::core::option::Option, - /// Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. - #[prost(message, optional, tag = "10")] - pub parent_node_metadata: ::core::option::Option, - /// Retry group to indicate grouping of nodes by retries - #[prost(string, tag = "11")] - pub retry_group: ::prost::alloc::string::String, - /// Identifier of the node in the original workflow/graph - /// This maps to value of WorkflowTemplate.nodes\[X\].id - #[prost(string, tag = "12")] - pub spec_node_id: ::prost::alloc::string::String, - /// Friendly readable name for the node - #[prost(string, tag = "13")] - pub node_name: ::prost::alloc::string::String, - #[prost(int32, tag = "16")] - pub event_version: i32, - /// Whether this node launched a subworkflow. - #[prost(bool, tag = "17")] - pub is_parent: bool, - /// Whether this node yielded a dynamic workflow. - #[prost(bool, tag = "18")] - pub is_dynamic: bool, - /// String location uniquely identifying where the deck HTML file is - /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - #[prost(string, tag = "19")] - pub deck_uri: ::prost::alloc::string::String, - /// This timestamp represents the instant when the event was reported by the executing framework. For example, - /// when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when - /// literal inputs are initially copied. The event however will not be sent until after the copy completes. - /// Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. - #[prost(message, optional, tag = "21")] - pub reported_at: ::core::option::Option<::prost_types::Timestamp>, - /// Indicates if this node is an ArrayNode. - #[prost(bool, tag = "22")] - pub is_array: bool, - #[prost(oneof = "node_execution_event::InputValue", tags = "5, 20")] - pub input_value: ::core::option::Option, - #[prost(oneof = "node_execution_event::OutputResult", tags = "6, 7, 15")] - pub output_result: ::core::option::Option, - /// Additional metadata to do with this event's node target based - /// on the node type - #[prost(oneof = "node_execution_event::TargetMetadata", tags = "8, 14")] - pub target_metadata: ::core::option::Option, -} -/// Nested message and enum types in `NodeExecutionEvent`. -pub mod node_execution_event { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum InputValue { - #[prost(string, tag = "5")] - InputUri(::prost::alloc::string::String), - /// Raw input data consumed by this node execution. - #[prost(message, tag = "20")] - InputData(super::super::core::LiteralMap), - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// URL to the output of the execution, it encodes all the information - /// including Cloud source provider. ie., s3://... - #[prost(string, tag = "6")] - OutputUri(::prost::alloc::string::String), - /// Error information for the execution - #[prost(message, tag = "7")] - Error(super::super::core::ExecutionError), - /// Raw output data produced by this node execution. - #[prost(message, tag = "15")] - OutputData(super::super::core::LiteralMap), - } - /// Additional metadata to do with this event's node target based - /// on the node type - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum TargetMetadata { - #[prost(message, tag = "8")] - WorkflowNodeMetadata(super::WorkflowNodeMetadata), - #[prost(message, tag = "14")] - TaskNodeMetadata(super::TaskNodeMetadata), - } -} -/// For Workflow Nodes we need to send information about the workflow that's launched -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkflowNodeMetadata { - #[prost(message, optional, tag = "1")] - pub execution_id: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskNodeMetadata { - /// Captures the status of caching for this execution. - #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "1")] - pub cache_status: i32, - /// This structure carries the catalog artifact information - #[prost(message, optional, tag = "2")] - pub catalog_key: ::core::option::Option, - /// Captures the status of cache reservations for this execution. - #[prost(enumeration = "super::core::catalog_reservation::Status", tag = "3")] - pub reservation_status: i32, - /// The latest checkpoint location - #[prost(string, tag = "4")] - pub checkpoint_uri: ::prost::alloc::string::String, - /// In the case this task launched a dynamic workflow we capture its structure here. - #[prost(message, optional, tag = "16")] - pub dynamic_workflow: ::core::option::Option, -} -/// For dynamic workflow nodes we send information about the dynamic workflow definition that gets generated. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DynamicWorkflowNodeMetadata { - /// id represents the unique identifier of the workflow. - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, - /// Represents the compiled representation of the embedded dynamic workflow. - #[prost(message, optional, tag = "2")] - pub compiled_workflow: ::core::option::Option, - /// dynamic_job_spec_uri is the location of the DynamicJobSpec proto message for this DynamicWorkflow. This is - /// required to correctly recover partially completed executions where the workflow has already been compiled. - #[prost(string, tag = "3")] - pub dynamic_job_spec_uri: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ParentTaskExecutionMetadata { - #[prost(message, optional, tag = "1")] - pub id: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ParentNodeExecutionMetadata { - /// Unique identifier of the parent node id within the execution - /// This is value of core.NodeExecutionIdentifier.node_id of the parent node - #[prost(string, tag = "1")] - pub node_id: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct EventReason { - /// An explanation for this event - #[prost(string, tag = "1")] - pub reason: ::prost::alloc::string::String, - /// The time this reason occurred - #[prost(message, optional, tag = "2")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionEvent { - /// ID of the task. In combination with the retryAttempt this will indicate - /// the task execution uniquely for a given parent node execution. - #[prost(message, optional, tag = "1")] - pub task_id: ::core::option::Option, - /// A task execution is always kicked off by a node execution, the event consumer - /// will use the parent_id to relate the task to it's parent node execution - #[prost(message, optional, tag = "2")] - pub parent_node_execution_id: ::core::option::Option< - super::core::NodeExecutionIdentifier, - >, - /// retry attempt number for this task, ie., 2 for the second attempt - #[prost(uint32, tag = "3")] - pub retry_attempt: u32, - /// Phase associated with the event - #[prost(enumeration = "super::core::task_execution::Phase", tag = "4")] - pub phase: i32, - /// id of the process that sent this event, mainly for trace debugging - #[prost(string, tag = "5")] - pub producer_id: ::prost::alloc::string::String, - /// log information for the task execution - #[prost(message, repeated, tag = "6")] - pub logs: ::prost::alloc::vec::Vec, - /// This timestamp represents when the original event occurred, it is generated - /// by the executor of the task. - #[prost(message, optional, tag = "7")] - pub occurred_at: ::core::option::Option<::prost_types::Timestamp>, - /// Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. - #[prost(message, optional, tag = "11")] - pub custom_info: ::core::option::Option<::prost_types::Struct>, - /// Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) - /// that should be recorded regardless of the lack of phase change. - /// The version field should be incremented when metadata changes across the duration of an individual phase. - #[prost(uint32, tag = "12")] - pub phase_version: u32, - /// An optional explanation for the phase transition. - /// Deprecated: Use reasons instead. - #[deprecated] - #[prost(string, tag = "13")] - pub reason: ::prost::alloc::string::String, - /// An optional list of explanations for the phase transition. - #[prost(message, repeated, tag = "21")] - pub reasons: ::prost::alloc::vec::Vec, - /// A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin - /// this type will be identical, but not all task executions necessarily use pre-registered definitions and this - /// type is useful to render the task in the UI, filter task executions, etc. - #[prost(string, tag = "14")] - pub task_type: ::prost::alloc::string::String, - /// Metadata around how a task was executed. - #[prost(message, optional, tag = "16")] - pub metadata: ::core::option::Option, - /// The event version is used to indicate versioned changes in how data is reported using this - /// proto message. For example, event_verison > 0 means that maps tasks report logs using the - /// TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog - /// in this message. - #[prost(int32, tag = "18")] - pub event_version: i32, - /// This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s - /// pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, - /// but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps - /// facilitates a more accurate portrayal of the evaluation time-series. - #[prost(message, optional, tag = "20")] - pub reported_at: ::core::option::Option<::prost_types::Timestamp>, - #[prost(oneof = "task_execution_event::InputValue", tags = "8, 19")] - pub input_value: ::core::option::Option, - #[prost(oneof = "task_execution_event::OutputResult", tags = "9, 10, 17")] - pub output_result: ::core::option::Option, -} -/// Nested message and enum types in `TaskExecutionEvent`. -pub mod task_execution_event { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum InputValue { - /// URI of the input file, it encodes all the information - /// including Cloud source provider. ie., s3://... - #[prost(string, tag = "8")] - InputUri(::prost::alloc::string::String), - /// Raw input data consumed by this task execution. - #[prost(message, tag = "19")] - InputData(super::super::core::LiteralMap), - } - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum OutputResult { - /// URI to the output of the execution, it will be in a format that encodes all the information - /// including Cloud source provider. ie., s3://... - #[prost(string, tag = "9")] - OutputUri(::prost::alloc::string::String), - /// Error information for the execution - #[prost(message, tag = "10")] - Error(super::super::core::ExecutionError), - /// Raw output data produced by this task execution. - #[prost(message, tag = "17")] - OutputData(super::super::core::LiteralMap), - } -} -/// This message contains metadata about external resources produced or used by a specific task execution. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExternalResourceInfo { - /// Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. - #[prost(string, tag = "1")] - pub external_id: ::prost::alloc::string::String, - /// A unique index for the external resource with respect to all external resources for this task. Although the - /// identifier may change between task reporting events or retries, this will remain the same to enable aggregating - /// information from multiple reports. - #[prost(uint32, tag = "2")] - pub index: u32, - /// Retry attempt number for this external resource, ie., 2 for the second attempt - #[prost(uint32, tag = "3")] - pub retry_attempt: u32, - /// Phase associated with the external resource - #[prost(enumeration = "super::core::task_execution::Phase", tag = "4")] - pub phase: i32, - /// Captures the status of caching for this external resource execution. - #[prost(enumeration = "super::core::CatalogCacheStatus", tag = "5")] - pub cache_status: i32, - /// log information for the external resource execution - #[prost(message, repeated, tag = "6")] - pub logs: ::prost::alloc::vec::Vec, -} -/// This message holds task execution metadata specific to resource allocation used to manage concurrent -/// executions for a project namespace. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ResourcePoolInfo { - /// Unique resource ID used to identify this execution when allocating a token. - #[prost(string, tag = "1")] - pub allocation_token: ::prost::alloc::string::String, - /// Namespace under which this task execution requested an allocation token. - #[prost(string, tag = "2")] - pub namespace: ::prost::alloc::string::String, -} -/// Holds metadata around how a task was executed. -/// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, -/// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. -/// Metadata is a container for these attributes across the task execution lifecycle. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskExecutionMetadata { - /// Unique, generated name for this task execution used by the backend. - #[prost(string, tag = "1")] - pub generated_name: ::prost::alloc::string::String, - /// Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. - #[prost(message, repeated, tag = "2")] - pub external_resources: ::prost::alloc::vec::Vec, - /// Includes additional data on concurrent resource management used during execution.. - /// This is a repeated field because a plugin can request multiple resource allocations during execution. - #[prost(message, repeated, tag = "3")] - pub resource_pool_info: ::prost::alloc::vec::Vec, - /// The identifier of the plugin used to execute this task. - #[prost(string, tag = "4")] - pub plugin_identifier: ::prost::alloc::string::String, - #[prost(enumeration = "task_execution_metadata::InstanceClass", tag = "16")] - pub instance_class: i32, -} -/// Nested message and enum types in `TaskExecutionMetadata`. -pub mod task_execution_metadata { - /// Includes the broad category of machine used for this specific task execution. - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum InstanceClass { - /// The default instance class configured for the flyte application platform. - Default = 0, - /// The instance class configured for interruptible tasks. - Interruptible = 1, - } - impl InstanceClass { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - InstanceClass::Default => "DEFAULT", - InstanceClass::Interruptible => "INTERRUPTIBLE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "DEFAULT" => Some(Self::Default), - "INTERRUPTIBLE" => Some(Self::Interruptible), - _ => None, - } - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs deleted file mode 100644 index 2c948f89ac..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.kubeflow.rs +++ /dev/null @@ -1,207 +0,0 @@ -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RunPolicy { - /// Defines the policy to kill pods after the job completes. Default to None. - #[prost(enumeration = "CleanPodPolicy", tag = "1")] - pub clean_pod_policy: i32, - /// TTL to clean up jobs. Default to infinite. - #[prost(int32, tag = "2")] - pub ttl_seconds_after_finished: i32, - /// Specifies the duration in seconds relative to the startTime that the job may be active - /// before the system tries to terminate it; value must be positive integer. - #[prost(int32, tag = "3")] - pub active_deadline_seconds: i32, - /// Number of retries before marking this job failed. - #[prost(int32, tag = "4")] - pub backoff_limit: i32, -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum RestartPolicy { - Never = 0, - OnFailure = 1, - Always = 2, -} -impl RestartPolicy { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - RestartPolicy::Never => "RESTART_POLICY_NEVER", - RestartPolicy::OnFailure => "RESTART_POLICY_ON_FAILURE", - RestartPolicy::Always => "RESTART_POLICY_ALWAYS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "RESTART_POLICY_NEVER" => Some(Self::Never), - "RESTART_POLICY_ON_FAILURE" => Some(Self::OnFailure), - "RESTART_POLICY_ALWAYS" => Some(Self::Always), - _ => None, - } - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum CleanPodPolicy { - CleanpodPolicyNone = 0, - CleanpodPolicyRunning = 1, - CleanpodPolicyAll = 2, -} -impl CleanPodPolicy { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - CleanPodPolicy::CleanpodPolicyNone => "CLEANPOD_POLICY_NONE", - CleanPodPolicy::CleanpodPolicyRunning => "CLEANPOD_POLICY_RUNNING", - CleanPodPolicy::CleanpodPolicyAll => "CLEANPOD_POLICY_ALL", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "CLEANPOD_POLICY_NONE" => Some(Self::CleanpodPolicyNone), - "CLEANPOD_POLICY_RUNNING" => Some(Self::CleanpodPolicyRunning), - "CLEANPOD_POLICY_ALL" => Some(Self::CleanpodPolicyAll), - _ => None, - } - } -} -/// Proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedMpiTrainingTask { - /// Worker replicas spec - #[prost(message, optional, tag = "1")] - pub worker_replicas: ::core::option::Option, - /// Master replicas spec - #[prost(message, optional, tag = "2")] - pub launcher_replicas: ::core::option::Option, - /// RunPolicy encapsulates various runtime policies of the distributed training - /// job, for example how to clean up resources and how long the job can stay - /// active. - #[prost(message, optional, tag = "3")] - pub run_policy: ::core::option::Option, - /// Number of slots per worker - #[prost(int32, tag = "4")] - pub slots: i32, -} -/// Replica specification for distributed MPI training -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedMpiTrainingReplicaSpec { - /// Number of replicas - #[prost(int32, tag = "1")] - pub replicas: i32, - /// Image used for the replica group - #[prost(string, tag = "2")] - pub image: ::prost::alloc::string::String, - /// Resources required for the replica group - #[prost(message, optional, tag = "3")] - pub resources: ::core::option::Option, - /// Restart policy determines whether pods will be restarted when they exit - #[prost(enumeration = "RestartPolicy", tag = "4")] - pub restart_policy: i32, - /// MPI sometimes requires different command set for different replica groups - #[prost(string, repeated, tag = "5")] - pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, -} -/// Custom proto for torch elastic config for distributed training using -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ElasticConfig { - #[prost(string, tag = "1")] - pub rdzv_backend: ::prost::alloc::string::String, - #[prost(int32, tag = "2")] - pub min_replicas: i32, - #[prost(int32, tag = "3")] - pub max_replicas: i32, - #[prost(int32, tag = "4")] - pub nproc_per_node: i32, - #[prost(int32, tag = "5")] - pub max_restarts: i32, -} -/// Proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedPyTorchTrainingTask { - /// Worker replicas spec - #[prost(message, optional, tag = "1")] - pub worker_replicas: ::core::option::Option, - /// Master replicas spec, master replicas can only have 1 replica - #[prost(message, optional, tag = "2")] - pub master_replicas: ::core::option::Option, - /// RunPolicy encapsulates various runtime policies of the distributed training - /// job, for example how to clean up resources and how long the job can stay - /// active. - #[prost(message, optional, tag = "3")] - pub run_policy: ::core::option::Option, - /// config for an elastic pytorch job - #[prost(message, optional, tag = "4")] - pub elastic_config: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedPyTorchTrainingReplicaSpec { - /// Number of replicas - #[prost(int32, tag = "1")] - pub replicas: i32, - /// Image used for the replica group - #[prost(string, tag = "2")] - pub image: ::prost::alloc::string::String, - /// Resources required for the replica group - #[prost(message, optional, tag = "3")] - pub resources: ::core::option::Option, - /// RestartPolicy determines whether pods will be restarted when they exit - #[prost(enumeration = "RestartPolicy", tag = "4")] - pub restart_policy: i32, -} -/// Proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedTensorflowTrainingTask { - /// Worker replicas spec - #[prost(message, optional, tag = "1")] - pub worker_replicas: ::core::option::Option< - DistributedTensorflowTrainingReplicaSpec, - >, - /// Parameter server replicas spec - #[prost(message, optional, tag = "2")] - pub ps_replicas: ::core::option::Option, - /// Chief replicas spec - #[prost(message, optional, tag = "3")] - pub chief_replicas: ::core::option::Option, - /// RunPolicy encapsulates various runtime policies of the distributed training - /// job, for example how to clean up resources and how long the job can stay - /// active. - #[prost(message, optional, tag = "4")] - pub run_policy: ::core::option::Option, - /// Evaluator replicas spec - #[prost(message, optional, tag = "5")] - pub evaluator_replicas: ::core::option::Option< - DistributedTensorflowTrainingReplicaSpec, - >, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedTensorflowTrainingReplicaSpec { - /// Number of replicas - #[prost(int32, tag = "1")] - pub replicas: i32, - /// Image used for the replica group - #[prost(string, tag = "2")] - pub image: ::prost::alloc::string::String, - /// Resources required for the replica group - #[prost(message, optional, tag = "3")] - pub resources: ::core::option::Option, - /// RestartPolicy Determines whether pods will be restarted when they exit - #[prost(enumeration = "RestartPolicy", tag = "4")] - pub restart_policy: i32, -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs deleted file mode 100644 index b1ab41f0f0..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.plugins.rs +++ /dev/null @@ -1,346 +0,0 @@ -/// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field -/// of a Presto task's TaskTemplate -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PrestoQuery { - #[prost(string, tag = "1")] - pub routing_group: ::prost::alloc::string::String, - #[prost(string, tag = "2")] - pub catalog: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub schema: ::prost::alloc::string::String, - #[prost(string, tag = "4")] - pub statement: ::prost::alloc::string::String, -} -/// Defines a query to execute on a hive cluster. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HiveQuery { - #[prost(string, tag = "1")] - pub query: ::prost::alloc::string::String, - #[prost(uint32, tag = "2")] - pub timeout_sec: u32, - #[prost(uint32, tag = "3")] - pub retry_count: u32, -} -/// Defines a collection of hive queries. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HiveQueryCollection { - #[prost(message, repeated, tag = "2")] - pub queries: ::prost::alloc::vec::Vec, -} -/// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field -/// of a hive task's TaskTemplate -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct QuboleHiveJob { - #[prost(string, tag = "1")] - pub cluster_label: ::prost::alloc::string::String, - #[deprecated] - #[prost(message, optional, tag = "2")] - pub query_collection: ::core::option::Option, - #[prost(string, repeated, tag = "3")] - pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(message, optional, tag = "4")] - pub query: ::core::option::Option, -} -/// RayJobSpec defines the desired state of RayJob -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RayJob { - /// RayClusterSpec is the cluster template to run the job - #[prost(message, optional, tag = "1")] - pub ray_cluster: ::core::option::Option, - /// runtime_env is base64 encoded. - /// Ray runtime environments: - #[prost(string, tag = "2")] - pub runtime_env: ::prost::alloc::string::String, - /// shutdown_after_job_finishes specifies whether the RayCluster should be deleted after the RayJob finishes. - #[prost(bool, tag = "3")] - pub shutdown_after_job_finishes: bool, - /// ttl_seconds_after_finished specifies the number of seconds after which the RayCluster will be deleted after the RayJob finishes. - #[prost(int32, tag = "4")] - pub ttl_seconds_after_finished: i32, -} -/// Define Ray cluster defines the desired state of RayCluster -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct RayCluster { - /// HeadGroupSpecs are the spec for the head pod - #[prost(message, optional, tag = "1")] - pub head_group_spec: ::core::option::Option, - /// WorkerGroupSpecs are the specs for the worker pods - #[prost(message, repeated, tag = "2")] - pub worker_group_spec: ::prost::alloc::vec::Vec, - /// Whether to enable autoscaling. - #[prost(bool, tag = "3")] - pub enable_autoscaling: bool, -} -/// HeadGroupSpec are the spec for the head pod -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HeadGroupSpec { - /// Optional. RayStartParams are the params of the start command: address, object-store-memory. - /// Refer to - #[prost(map = "string, string", tag = "1")] - pub ray_start_params: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// WorkerGroupSpec are the specs for the worker pods -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct WorkerGroupSpec { - /// Required. RayCluster can have multiple worker groups, and it distinguishes them by name - #[prost(string, tag = "1")] - pub group_name: ::prost::alloc::string::String, - /// Required. Desired replicas of the worker group. Defaults to 1. - #[prost(int32, tag = "2")] - pub replicas: i32, - /// Optional. Min replicas of the worker group. MinReplicas defaults to 1. - #[prost(int32, tag = "3")] - pub min_replicas: i32, - /// Optional. Max replicas of the worker group. MaxReplicas defaults to maxInt32 - #[prost(int32, tag = "4")] - pub max_replicas: i32, - /// Optional. RayStartParams are the params of the start command: address, object-store-memory. - /// Refer to - #[prost(map = "string, string", tag = "5")] - pub ray_start_params: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SparkApplication {} -/// Nested message and enum types in `SparkApplication`. -pub mod spark_application { - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Type { - Python = 0, - Java = 1, - Scala = 2, - R = 3, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Python => "PYTHON", - Type::Java => "JAVA", - Type::Scala => "SCALA", - Type::R => "R", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "PYTHON" => Some(Self::Python), - "JAVA" => Some(Self::Java), - "SCALA" => Some(Self::Scala), - "R" => Some(Self::R), - _ => None, - } - } - } -} -/// Custom Proto for Spark Plugin. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SparkJob { - #[prost(enumeration = "spark_application::Type", tag = "1")] - pub application_type: i32, - #[prost(string, tag = "2")] - pub main_application_file: ::prost::alloc::string::String, - #[prost(string, tag = "3")] - pub main_class: ::prost::alloc::string::String, - #[prost(map = "string, string", tag = "4")] - pub spark_conf: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - #[prost(map = "string, string", tag = "5")] - pub hadoop_conf: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Executor path for Python jobs. - #[prost(string, tag = "6")] - pub executor_path: ::prost::alloc::string::String, - /// Databricks job configuration. - /// Config structure can be found here. - #[prost(message, optional, tag = "7")] - pub databricks_conf: ::core::option::Option<::prost_types::Struct>, - /// Databricks access token. - /// This token can be set in either flytepropeller or flytekit. - #[prost(string, tag = "8")] - pub databricks_token: ::prost::alloc::string::String, - /// Domain name of your deployment. Use the form .cloud.databricks.com. - /// This instance name can be set in either flytepropeller or flytekit. - #[prost(string, tag = "9")] - pub databricks_instance: ::prost::alloc::string::String, -} -/// Describes a job that can process independent pieces of data concurrently. Multiple copies of the runnable component -/// will be executed concurrently. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ArrayJob { - /// Defines the maximum number of instances to bring up concurrently at any given point. Note that this is an - /// optimistic restriction and that, due to network partitioning or other failures, the actual number of currently - /// running instances might be more. This has to be a positive number if assigned. Default value is size. - #[prost(int64, tag = "1")] - pub parallelism: i64, - /// Defines the number of instances to launch at most. This number should match the size of the input if the job - /// requires processing of all input data. This has to be a positive number. - /// In the case this is not defined, the back-end will determine the size at run-time by reading the inputs. - #[prost(int64, tag = "2")] - pub size: i64, - #[prost(oneof = "array_job::SuccessCriteria", tags = "3, 4")] - pub success_criteria: ::core::option::Option, -} -/// Nested message and enum types in `ArrayJob`. -pub mod array_job { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SuccessCriteria { - /// An absolute number of the minimum number of successful completions of subtasks. As soon as this criteria is met, - /// the array job will be marked as successful and outputs will be computed. This has to be a non-negative number if - /// assigned. Default value is size (if specified). - #[prost(int64, tag = "3")] - MinSuccesses(i64), - /// If the array job size is not known beforehand, the min_success_ratio can instead be used to determine when an array - /// job can be marked successful. - #[prost(float, tag = "4")] - MinSuccessRatio(f32), - } -} -/// Represents an Execution that was launched and could be waited on. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Waitable { - #[prost(message, optional, tag = "1")] - pub wf_exec_id: ::core::option::Option, - #[prost(enumeration = "super::core::workflow_execution::Phase", tag = "2")] - pub phase: i32, - #[prost(string, tag = "3")] - pub workflow_id: ::prost::alloc::string::String, -} -/// Custom Proto for Dask Plugin. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DaskJob { - /// Spec for the scheduler pod. - #[prost(message, optional, tag = "1")] - pub scheduler: ::core::option::Option, - /// Spec of the default worker group. - #[prost(message, optional, tag = "2")] - pub workers: ::core::option::Option, -} -/// Specification for the scheduler pod. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DaskScheduler { - /// Optional image to use. If unset, will use the default image. - #[prost(string, tag = "1")] - pub image: ::prost::alloc::string::String, - /// Resources assigned to the scheduler pod. - #[prost(message, optional, tag = "2")] - pub resources: ::core::option::Option, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DaskWorkerGroup { - /// Number of workers in the group. - #[prost(uint32, tag = "1")] - pub number_of_workers: u32, - /// Optional image to use for the pods of the worker group. If unset, will use the default image. - #[prost(string, tag = "2")] - pub image: ::prost::alloc::string::String, - /// Resources assigned to the all pods of the worker group. - /// As per - /// it is advised to only set limits. If requests are not explicitly set, the plugin will make - /// sure to set requests==limits. - /// The plugin sets ` --memory-limit` as well as `--nthreads` for the workers according to the limit. - #[prost(message, optional, tag = "3")] - pub resources: ::core::option::Option, -} -/// MPI operator proposal -/// Custom proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedMpiTrainingTask { - /// number of worker spawned in the cluster for this job - #[prost(int32, tag = "1")] - pub num_workers: i32, - /// number of launcher replicas spawned in the cluster for this job - /// The launcher pod invokes mpirun and communicates with worker pods through MPI. - #[prost(int32, tag = "2")] - pub num_launcher_replicas: i32, - /// number of slots per worker used in hostfile. - /// The available slots (GPUs) in each pod. - #[prost(int32, tag = "3")] - pub slots: i32, -} -/// Custom proto for torch elastic config for distributed training using -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ElasticConfig { - #[prost(string, tag = "1")] - pub rdzv_backend: ::prost::alloc::string::String, - #[prost(int32, tag = "2")] - pub min_replicas: i32, - #[prost(int32, tag = "3")] - pub max_replicas: i32, - #[prost(int32, tag = "4")] - pub nproc_per_node: i32, - #[prost(int32, tag = "5")] - pub max_restarts: i32, -} -/// Custom proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedPyTorchTrainingTask { - /// number of worker replicas spawned in the cluster for this job - #[prost(int32, tag = "1")] - pub workers: i32, - /// config for an elastic pytorch job - /// - #[prost(message, optional, tag = "2")] - pub elastic_config: ::core::option::Option, -} -/// Custom proto for plugin that enables distributed training using -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct DistributedTensorflowTrainingTask { - /// number of worker replicas spawned in the cluster for this job - #[prost(int32, tag = "1")] - pub workers: i32, - /// PS -> Parameter server - /// number of ps replicas spawned in the cluster for this job - #[prost(int32, tag = "2")] - pub ps_replicas: i32, - /// number of chief replicas spawned in the cluster for this job - #[prost(int32, tag = "3")] - pub chief_replicas: i32, - /// number of evaluator replicas spawned in the cluster for this job - #[prost(int32, tag = "4")] - pub evaluator_replicas: i32, -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs b/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs deleted file mode 100644 index 09b0c3d600..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/flyteidl.service.rs +++ /dev/null @@ -1,3509 +0,0 @@ -/// Generated client implementations. -pub mod signal_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// SignalService defines an RPC Service that may create, update, and retrieve signal(s). - #[derive(Debug, Clone)] - pub struct SignalServiceClient { - inner: tonic::client::Grpc, - } - impl SignalServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl SignalServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> SignalServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - SignalServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Fetches or creates a :ref:`ref_flyteidl.admin.Signal`. - pub async fn get_or_create_signal( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::SignalGetOrCreateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.SignalService/GetOrCreateSignal", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.SignalService", - "GetOrCreateSignal", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.Signal` definitions. - pub async fn list_signals( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.SignalService/ListSignals", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.SignalService", "ListSignals"), - ); - self.inner.unary(req, path, codec).await - } - /// Sets the value on a :ref:`ref_flyteidl.admin.Signal` definition - pub async fn set_signal( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.SignalService/SetSignal", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.SignalService", "SetSignal")); - self.inner.unary(req, path, codec).await - } - } -} -/// Represents a request structure to create task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskCreateRequest { - /// The inputs required to start the execution. All required inputs must be - /// included in this map. If not required and not provided, defaults apply. - /// +optional - #[prost(message, optional, tag = "1")] - pub inputs: ::core::option::Option, - /// Template of the task that encapsulates all the metadata of the task. - #[prost(message, optional, tag = "2")] - pub template: ::core::option::Option, - /// Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring) - #[prost(string, tag = "3")] - pub output_prefix: ::prost::alloc::string::String, -} -/// Represents a create response structure. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskCreateResponse { - #[prost(string, tag = "1")] - pub job_id: ::prost::alloc::string::String, -} -/// A message used to fetch a job state from backend plugin server. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskGetRequest { - /// A predefined yet extensible Task type identifier. - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// The unique id identifying the job. - #[prost(string, tag = "2")] - pub job_id: ::prost::alloc::string::String, -} -/// Response to get an individual task state. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskGetResponse { - /// The state of the execution is used to control its visibility in the UI/CLI. - #[prost(enumeration = "State", tag = "1")] - pub state: i32, - /// The outputs of the execution. It's typically used by sql task. Flyteplugins service will create a - /// Structured dataset pointing to the query result table. - /// +optional - #[prost(message, optional, tag = "2")] - pub outputs: ::core::option::Option, -} -/// A message used to delete a task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskDeleteRequest { - /// A predefined yet extensible Task type identifier. - #[prost(string, tag = "1")] - pub task_type: ::prost::alloc::string::String, - /// The unique id identifying the job. - #[prost(string, tag = "2")] - pub job_id: ::prost::alloc::string::String, -} -/// Response to delete a task. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TaskDeleteResponse {} -/// The state of the execution is used to control its visibility in the UI/CLI. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum State { - RetryableFailure = 0, - PermanentFailure = 1, - Pending = 2, - Running = 3, - Succeeded = 4, -} -impl State { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - State::RetryableFailure => "RETRYABLE_FAILURE", - State::PermanentFailure => "PERMANENT_FAILURE", - State::Pending => "PENDING", - State::Running => "RUNNING", - State::Succeeded => "SUCCEEDED", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "RETRYABLE_FAILURE" => Some(Self::RetryableFailure), - "PERMANENT_FAILURE" => Some(Self::PermanentFailure), - "PENDING" => Some(Self::Pending), - "RUNNING" => Some(Self::Running), - "SUCCEEDED" => Some(Self::Succeeded), - _ => None, - } - } -} -/// Generated client implementations. -pub mod external_plugin_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// ExternalPluginService defines an RPC Service that allows propeller to send the request to the backend plugin server. - #[derive(Debug, Clone)] - pub struct ExternalPluginServiceClient { - inner: tonic::client::Grpc, - } - impl ExternalPluginServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl ExternalPluginServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> ExternalPluginServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - ExternalPluginServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Send a task create request to the backend plugin server. - pub async fn create_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.ExternalPluginService/CreateTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.ExternalPluginService", - "CreateTask", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Get job status. - pub async fn get_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.ExternalPluginService/GetTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.ExternalPluginService", "GetTask"), - ); - self.inner.unary(req, path, codec).await - } - /// Delete the task resource. - pub async fn delete_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.ExternalPluginService/DeleteTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.ExternalPluginService", - "DeleteTask", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateUploadLocationResponse { - /// SignedUrl specifies the url to use to upload content to (e.g. ) - #[prost(string, tag = "1")] - pub signed_url: ::prost::alloc::string::String, - /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - #[prost(string, tag = "2")] - pub native_url: ::prost::alloc::string::String, - /// ExpiresAt defines when will the signed URL expires. - #[prost(message, optional, tag = "3")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, - /// Data proxy generates these headers for client, and they have to add these headers to the request when uploading the file. - #[prost(map = "string, string", tag = "4")] - pub headers: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// CreateUploadLocationRequest specified request for the CreateUploadLocation API. -/// The implementation in data proxy service will create the s3 location with some server side configured prefixes, -/// and then: -/// - project/domain/(a deterministic str representation of the content_md5)/filename (if present); OR -/// - project/domain/filename_root (if present)/filename (if present). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateUploadLocationRequest { - /// Project to create the upload location for - /// +required - #[prost(string, tag = "1")] - pub project: ::prost::alloc::string::String, - /// Domain to create the upload location for. - /// +required - #[prost(string, tag = "2")] - pub domain: ::prost::alloc::string::String, - /// Filename specifies a desired suffix for the generated location. E.g. `file.py` or `pre/fix/file.zip`. - /// +optional. By default, the service will generate a consistent name based on the provided parameters. - #[prost(string, tag = "3")] - pub filename: ::prost::alloc::string::String, - /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - /// exceeds the platform allowed max. - /// +optional. The default value comes from a global config. - #[prost(message, optional, tag = "4")] - pub expires_in: ::core::option::Option<::prost_types::Duration>, - /// ContentMD5 restricts the upload location to the specific MD5 provided. The ContentMD5 will also appear in the - /// generated path. - /// +required - #[prost(bytes = "vec", tag = "5")] - pub content_md5: ::prost::alloc::vec::Vec, - /// If present, data proxy will use this string in lieu of the md5 hash in the path. When the filename is also included - /// this makes the upload location deterministic. The native url will still be prefixed by the upload location prefix - /// in data proxy config. This option is useful when uploading multiple files. - /// +optional - #[prost(string, tag = "6")] - pub filename_root: ::prost::alloc::string::String, - /// If true, the data proxy will add content_md5 to the metadata to the signed URL and - /// it will force clients to add this metadata to the object. - /// This make sure dataproxy is backward compatible with the old flytekit. - #[prost(bool, tag = "7")] - pub add_content_md5_metadata: bool, - /// Optional, org key applied to the resource. - #[prost(string, tag = "8")] - pub org: ::prost::alloc::string::String, -} -/// CreateDownloadLocationRequest specified request for the CreateDownloadLocation API. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDownloadLocationRequest { - /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) - #[prost(string, tag = "1")] - pub native_url: ::prost::alloc::string::String, - /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - /// exceeds the platform allowed max. - /// +optional. The default value comes from a global config. - #[prost(message, optional, tag = "2")] - pub expires_in: ::core::option::Option<::prost_types::Duration>, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDownloadLocationResponse { - /// SignedUrl specifies the url to use to download content from (e.g. ) - #[prost(string, tag = "1")] - pub signed_url: ::prost::alloc::string::String, - /// ExpiresAt defines when will the signed URL expires. - #[prost(message, optional, tag = "2")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// CreateDownloadLinkRequest defines the request parameters to create a download link (signed url) -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDownloadLinkRequest { - /// ArtifactType of the artifact requested. - #[prost(enumeration = "ArtifactType", tag = "1")] - pub artifact_type: i32, - /// ExpiresIn defines a requested expiration duration for the generated url. The request will be rejected if this - /// exceeds the platform allowed max. - /// +optional. The default value comes from a global config. - #[prost(message, optional, tag = "2")] - pub expires_in: ::core::option::Option<::prost_types::Duration>, - #[prost(oneof = "create_download_link_request::Source", tags = "3")] - pub source: ::core::option::Option, -} -/// Nested message and enum types in `CreateDownloadLinkRequest`. -pub mod create_download_link_request { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Source { - /// NodeId is the unique identifier for the node execution. For a task node, this will retrieve the output of the - /// most recent attempt of the task. - #[prost(message, tag = "3")] - NodeExecutionId(super::super::core::NodeExecutionIdentifier), - } -} -/// CreateDownloadLinkResponse defines the response for the generated links -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CreateDownloadLinkResponse { - /// SignedUrl specifies the url to use to download content from (e.g. ) - #[deprecated] - #[prost(string, repeated, tag = "1")] - pub signed_url: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// ExpiresAt defines when will the signed URL expire. - #[deprecated] - #[prost(message, optional, tag = "2")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, - /// New wrapper object containing the signed urls and expiration time - #[prost(message, optional, tag = "3")] - pub pre_signed_urls: ::core::option::Option, -} -/// Wrapper object since the message is shared across this and the GetDataResponse -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PreSignedUrLs { - /// SignedUrl specifies the url to use to download content from (e.g. ) - #[prost(string, repeated, tag = "1")] - pub signed_url: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// ExpiresAt defines when will the signed URL expire. - #[prost(message, optional, tag = "2")] - pub expires_at: ::core::option::Option<::prost_types::Timestamp>, -} -/// General request artifact to retrieve data from a Flyte artifact url. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetDataRequest { - /// A unique identifier in the form of flyte:// that uniquely, for a given Flyte - /// backend, identifies a Flyte artifact (\[i\]nput, \[o\]output, flyte \[d\]eck, etc.). - /// e.g. flyte://v1/proj/development/execid/n2/0/i (for 0th task execution attempt input) - /// flyte://v1/proj/development/execid/n2/i (for node execution input) - /// flyte://v1/proj/development/execid/n2/o/o3 (the o3 output of the second node) - #[prost(string, tag = "1")] - pub flyte_url: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct GetDataResponse { - #[prost(oneof = "get_data_response::Data", tags = "1, 2, 3")] - pub data: ::core::option::Option, -} -/// Nested message and enum types in `GetDataResponse`. -pub mod get_data_response { - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Data { - /// literal map data will be returned - #[prost(message, tag = "1")] - LiteralMap(super::super::core::LiteralMap), - /// Flyte deck html will be returned as a signed url users can download - #[prost(message, tag = "2")] - PreSignedUrls(super::PreSignedUrLs), - /// Single literal will be returned. This is returned when the user/url requests a specific output or input - /// by name. See the o3 example above. - #[prost(message, tag = "3")] - Literal(super::super::core::Literal), - } -} -/// ArtifactType -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ArtifactType { - /// ARTIFACT_TYPE_UNDEFINED is the default, often invalid, value for the enum. - Undefined = 0, - /// ARTIFACT_TYPE_DECK refers to the deck html file optionally generated after a task, a workflow or a launch plan - /// finishes executing. - Deck = 1, -} -impl ArtifactType { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ArtifactType::Undefined => "ARTIFACT_TYPE_UNDEFINED", - ArtifactType::Deck => "ARTIFACT_TYPE_DECK", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "ARTIFACT_TYPE_UNDEFINED" => Some(Self::Undefined), - "ARTIFACT_TYPE_DECK" => Some(Self::Deck), - _ => None, - } - } -} -/// Generated client implementations. -pub mod data_proxy_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// DataProxyService defines an RPC Service that allows access to user-data in a controlled manner. - #[derive(Debug, Clone)] - pub struct DataProxyServiceClient { - inner: tonic::client::Grpc, - } - impl DataProxyServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl DataProxyServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> DataProxyServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - DataProxyServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// CreateUploadLocation creates a signed url to upload artifacts to for a given project/domain. - pub async fn create_upload_location( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.DataProxyService/CreateUploadLocation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.DataProxyService", - "CreateUploadLocation", - ), - ); - self.inner.unary(req, path, codec).await - } - /// CreateDownloadLocation creates a signed url to download artifacts. - pub async fn create_download_location( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.DataProxyService/CreateDownloadLocation", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.DataProxyService", - "CreateDownloadLocation", - ), - ); - self.inner.unary(req, path, codec).await - } - /// CreateDownloadLocation creates a signed url to download artifacts. - pub async fn create_download_link( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.DataProxyService/CreateDownloadLink", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.DataProxyService", - "CreateDownloadLink", - ), - ); - self.inner.unary(req, path, codec).await - } - pub async fn get_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.DataProxyService/GetData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.DataProxyService", "GetData")); - self.inner.unary(req, path, codec).await - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UserInfoRequest {} -/// See the OpenID Connect spec at for more information. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UserInfoResponse { - /// Locally unique and never reassigned identifier within the Issuer for the End-User, which is intended to be consumed - /// by the Client. - #[prost(string, tag = "1")] - pub subject: ::prost::alloc::string::String, - /// Full name - #[prost(string, tag = "2")] - pub name: ::prost::alloc::string::String, - /// Shorthand name by which the End-User wishes to be referred to - #[prost(string, tag = "3")] - pub preferred_username: ::prost::alloc::string::String, - /// Given name(s) or first name(s) - #[prost(string, tag = "4")] - pub given_name: ::prost::alloc::string::String, - /// Surname(s) or last name(s) - #[prost(string, tag = "5")] - pub family_name: ::prost::alloc::string::String, - /// Preferred e-mail address - #[prost(string, tag = "6")] - pub email: ::prost::alloc::string::String, - /// Profile picture URL - #[prost(string, tag = "7")] - pub picture: ::prost::alloc::string::String, - /// Additional claims - #[prost(message, optional, tag = "8")] - pub additional_claims: ::core::option::Option<::prost_types::Struct>, -} -/// Generated client implementations. -pub mod identity_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// IdentityService defines an RPC Service that interacts with user/app identities. - #[derive(Debug, Clone)] - pub struct IdentityServiceClient { - inner: tonic::client::Grpc, - } - impl IdentityServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl IdentityServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> IdentityServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - IdentityServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves user information about the currently logged in user. - pub async fn user_info( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.IdentityService/UserInfo", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.IdentityService", "UserInfo")); - self.inner.unary(req, path, codec).await - } - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OAuth2MetadataRequest {} -/// OAuth2MetadataResponse defines an RFC-Compliant response for /.well-known/oauth-authorization-server metadata -/// as defined in -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct OAuth2MetadataResponse { - /// Defines the issuer string in all JWT tokens this server issues. The issuer can be admin itself or an external - /// issuer. - #[prost(string, tag = "1")] - pub issuer: ::prost::alloc::string::String, - /// URL of the authorization server's authorization endpoint \[RFC6749\]. This is REQUIRED unless no grant types are - /// supported that use the authorization endpoint. - #[prost(string, tag = "2")] - pub authorization_endpoint: ::prost::alloc::string::String, - /// URL of the authorization server's token endpoint \[RFC6749\]. - #[prost(string, tag = "3")] - pub token_endpoint: ::prost::alloc::string::String, - /// Array containing a list of the OAuth 2.0 response_type values that this authorization server supports. - #[prost(string, repeated, tag = "4")] - pub response_types_supported: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, - /// JSON array containing a list of the OAuth 2.0 \[RFC6749\] scope values that this authorization server supports. - #[prost(string, repeated, tag = "5")] - pub scopes_supported: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// JSON array containing a list of client authentication methods supported by this token endpoint. - #[prost(string, repeated, tag = "6")] - pub token_endpoint_auth_methods_supported: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, - /// URL of the authorization server's JWK Set \[JWK\] document. The referenced document contains the signing key(s) the - /// client uses to validate signatures from the authorization server. - #[prost(string, tag = "7")] - pub jwks_uri: ::prost::alloc::string::String, - /// JSON array containing a list of Proof Key for Code Exchange (PKCE) \[RFC7636\] code challenge methods supported by - /// this authorization server. - #[prost(string, repeated, tag = "8")] - pub code_challenge_methods_supported: ::prost::alloc::vec::Vec< - ::prost::alloc::string::String, - >, - /// JSON array containing a list of the OAuth 2.0 grant type values that this authorization server supports. - #[prost(string, repeated, tag = "9")] - pub grant_types_supported: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// URL of the authorization server's device authorization endpoint, as defined in Section 3.1 of \[RFC8628\] - #[prost(string, tag = "10")] - pub device_authorization_endpoint: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PublicClientAuthConfigRequest {} -/// FlyteClientResponse encapsulates public information that flyte clients (CLIs... etc.) can use to authenticate users. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct PublicClientAuthConfigResponse { - /// client_id to use when initiating OAuth2 authorization requests. - #[prost(string, tag = "1")] - pub client_id: ::prost::alloc::string::String, - /// redirect uri to use when initiating OAuth2 authorization requests. - #[prost(string, tag = "2")] - pub redirect_uri: ::prost::alloc::string::String, - /// scopes to request when initiating OAuth2 authorization requests. - #[prost(string, repeated, tag = "3")] - pub scopes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Authorization Header to use when passing Access Tokens to the server. If not provided, the client should use the - /// default http `Authorization` header. - #[prost(string, tag = "4")] - pub authorization_metadata_key: ::prost::alloc::string::String, - /// ServiceHttpEndpoint points to the http endpoint for the backend. If empty, clients can assume the endpoint used - /// to configure the gRPC connection can be used for the http one respecting the insecure flag to choose between - /// SSL or no SSL connections. - #[prost(string, tag = "5")] - pub service_http_endpoint: ::prost::alloc::string::String, - /// audience to use when initiating OAuth2 authorization requests. - #[prost(string, tag = "6")] - pub audience: ::prost::alloc::string::String, -} -/// Generated client implementations. -pub mod auth_metadata_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// The following defines an RPC service that is also served over HTTP via grpc-gateway. - /// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go - /// RPCs defined in this service must be anonymously accessible. - #[derive(Debug, Clone)] - pub struct AuthMetadataServiceClient { - inner: tonic::client::Grpc, - } - impl AuthMetadataServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl AuthMetadataServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> AuthMetadataServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - AuthMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Anonymously accessible. Retrieves local or external oauth authorization server metadata. - pub async fn get_o_auth2_metadata( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AuthMetadataService/GetOAuth2Metadata", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AuthMetadataService", - "GetOAuth2Metadata", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Anonymously accessible. Retrieves the client information clients should use when initiating OAuth2 authorization - /// requests. - pub async fn get_public_client_config( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AuthMetadataService/GetPublicClientConfig", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AuthMetadataService", - "GetPublicClientConfig", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod sync_agent_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// SyncAgentService defines an RPC Service that allows propeller to send the request to the agent server synchronously. - #[derive(Debug, Clone)] - pub struct SyncAgentServiceClient { - inner: tonic::client::Grpc, - } - impl SyncAgentServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl SyncAgentServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> SyncAgentServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - SyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// ExecuteTaskSync streams the create request and inputs to the agent service and streams the outputs back. - pub async fn execute_task_sync( - &mut self, - request: impl tonic::IntoStreamingRequest< - Message = super::super::admin::ExecuteTaskSyncRequest, - >, - ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.SyncAgentService/ExecuteTaskSync", - ); - let mut req = request.into_streaming_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.SyncAgentService", - "ExecuteTaskSync", - ), - ); - self.inner.streaming(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod async_agent_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// AsyncAgentService defines an RPC Service that allows propeller to send the request to the agent server asynchronously. - #[derive(Debug, Clone)] - pub struct AsyncAgentServiceClient { - inner: tonic::client::Grpc, - } - impl AsyncAgentServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl AsyncAgentServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> AsyncAgentServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - AsyncAgentServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// CreateTask sends a task create request to the agent service. - pub async fn create_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AsyncAgentService/CreateTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AsyncAgentService", "CreateTask"), - ); - self.inner.unary(req, path, codec).await - } - /// Get job status. - pub async fn get_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AsyncAgentService/GetTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTask"), - ); - self.inner.unary(req, path, codec).await - } - /// Delete the task resource. - pub async fn delete_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AsyncAgentService/DeleteTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AsyncAgentService", "DeleteTask"), - ); - self.inner.unary(req, path, codec).await - } - /// GetTaskMetrics returns one or more task execution metrics, if available. - /// - /// Errors include - /// * OutOfRange if metrics are not available for the specified task time range - /// * various other errors - pub async fn get_task_metrics( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AsyncAgentService/GetTaskMetrics", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AsyncAgentService", - "GetTaskMetrics", - ), - ); - self.inner.unary(req, path, codec).await - } - /// GetTaskLogs returns task execution logs, if available. - pub async fn get_task_logs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response< - tonic::codec::Streaming, - >, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AsyncAgentService/GetTaskLogs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AsyncAgentService", "GetTaskLogs"), - ); - self.inner.server_streaming(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod agent_metadata_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// AgentMetadataService defines an RPC service that is also served over HTTP via grpc-gateway. - /// This service allows propeller or users to get the metadata of agents. - #[derive(Debug, Clone)] - pub struct AgentMetadataServiceClient { - inner: tonic::client::Grpc, - } - impl AgentMetadataServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl AgentMetadataServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> AgentMetadataServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - AgentMetadataServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Fetch a :ref:`ref_flyteidl.admin.Agent` definition. - pub async fn get_agent( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AgentMetadataService/GetAgent", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AgentMetadataService", "GetAgent"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.Agent` definitions. - pub async fn list_agents( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AgentMetadataService/ListAgents", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AgentMetadataService", - "ListAgents", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated client implementations. -pub mod admin_service_client { - #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// The following defines an RPC service that is also served over HTTP via grpc-gateway. - /// Standard response codes for both are defined here: https://github.com/grpc-ecosystem/grpc-gateway/blob/master/runtime/errors.go - #[derive(Debug, Clone)] - pub struct AdminServiceClient { - inner: tonic::client::Grpc, - } - impl AdminServiceClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl AdminServiceClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + Send + 'static, - ::Error: Into + Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> AdminServiceClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + Send + Sync, - { - AdminServiceClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Create and upload a :ref:`ref_flyteidl.admin.Task` definition - pub async fn create_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "CreateTask")); - self.inner.unary(req, path, codec).await - } - /// Fetch a :ref:`ref_flyteidl.admin.Task` definition. - pub async fn get_task( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetTask")); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of task objects. - pub async fn list_task_ids( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NamedEntityIdentifierListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListTaskIds", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTaskIds")); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.Task` definitions. - pub async fn list_tasks( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListTasks", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "ListTasks")); - self.inner.unary(req, path, codec).await - } - /// Create and upload a :ref:`ref_flyteidl.admin.Workflow` definition - pub async fn create_workflow( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateWorkflow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "CreateWorkflow"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a :ref:`ref_flyteidl.admin.Workflow` definition. - pub async fn get_workflow( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetWorkflow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetWorkflow")); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of workflow objects. - pub async fn list_workflow_ids( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NamedEntityIdentifierListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListWorkflowIds", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflowIds"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.Workflow` definitions. - pub async fn list_workflows( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListWorkflows", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListWorkflows"), - ); - self.inner.unary(req, path, codec).await - } - /// Create and upload a :ref:`ref_flyteidl.admin.LaunchPlan` definition - pub async fn create_launch_plan( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::LaunchPlanCreateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateLaunchPlan", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "CreateLaunchPlan"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a :ref:`ref_flyteidl.admin.LaunchPlan` definition. - pub async fn get_launch_plan( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetLaunchPlan", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetLaunchPlan"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch the active version of a :ref:`ref_flyteidl.admin.LaunchPlan`. - pub async fn get_active_launch_plan( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ActiveLaunchPlanRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetActiveLaunchPlan", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetActiveLaunchPlan", - ), - ); - self.inner.unary(req, path, codec).await - } - /// List active versions of :ref:`ref_flyteidl.admin.LaunchPlan`. - pub async fn list_active_launch_plans( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ActiveLaunchPlanListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListActiveLaunchPlans", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListActiveLaunchPlans", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.NamedEntityIdentifier` of launch plan objects. - pub async fn list_launch_plan_ids( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NamedEntityIdentifierListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListLaunchPlanIds", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlanIds"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.LaunchPlan` definitions. - pub async fn list_launch_plans( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListLaunchPlans", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListLaunchPlans"), - ); - self.inner.unary(req, path, codec).await - } - /// Updates the status of a registered :ref:`ref_flyteidl.admin.LaunchPlan`. - pub async fn update_launch_plan( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::LaunchPlanUpdateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateLaunchPlan", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "UpdateLaunchPlan"), - ); - self.inner.unary(req, path, codec).await - } - /// Triggers the creation of a :ref:`ref_flyteidl.admin.Execution` - pub async fn create_execution( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "CreateExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Triggers the creation of an identical :ref:`ref_flyteidl.admin.Execution` - pub async fn relaunch_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ExecutionRelaunchRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/RelaunchExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "RelaunchExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Recreates a previously-run workflow execution that will only start executing from the last known failure point. - /// In Recover mode, users cannot change any input parameters or update the version of the execution. - /// This is extremely useful to recover from system errors and byzantine faults like - Loss of K8s cluster, bugs in platform or instability, machine failures, - /// downstream system failures (downstream services), or simply to recover executions that failed because of retry exhaustion and should complete if tried again. - /// See :ref:`ref_flyteidl.admin.ExecutionRecoverRequest` for more details. - pub async fn recover_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ExecutionRecoverRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/RecoverExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "RecoverExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a :ref:`ref_flyteidl.admin.Execution`. - pub async fn get_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowExecutionGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Update execution belonging to project domain :ref:`ref_flyteidl.admin.Execution`. - pub async fn update_execution( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "UpdateExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches input and output data for a :ref:`ref_flyteidl.admin.Execution`. - pub async fn get_execution_data( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowExecutionGetDataRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetExecutionData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetExecutionData"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.Execution`. - pub async fn list_executions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListExecutions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListExecutions"), - ); - self.inner.unary(req, path, codec).await - } - /// Terminates an in-progress :ref:`ref_flyteidl.admin.Execution`. - pub async fn terminate_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ExecutionTerminateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/TerminateExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "TerminateExecution", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a :ref:`ref_flyteidl.admin.NodeExecution`. - pub async fn get_node_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NodeExecutionGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetNodeExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetNodeExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a :ref:`ref_flyteidl.admin.DynamicNodeWorkflowResponse`. - pub async fn get_dynamic_node_workflow( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::GetDynamicNodeWorkflowRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetDynamicNodeWorkflow", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetDynamicNodeWorkflow", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution`. - pub async fn list_node_executions( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NodeExecutionListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListNodeExecutions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListNodeExecutions", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.NodeExecution` launched by the reference :ref:`ref_flyteidl.admin.TaskExecution`. - pub async fn list_node_executions_for_task( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NodeExecutionForTaskListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListNodeExecutionsForTask", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListNodeExecutionsForTask", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches input and output data for a :ref:`ref_flyteidl.admin.NodeExecution`. - pub async fn get_node_execution_data( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NodeExecutionGetDataRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetNodeExecutionData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetNodeExecutionData", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Registers a :ref:`ref_flyteidl.admin.Project` with the Flyte deployment. - pub async fn register_project( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/RegisterProject", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "RegisterProject"), - ); - self.inner.unary(req, path, codec).await - } - /// Updates an existing :ref:`ref_flyteidl.admin.Project` - /// flyteidl.admin.Project should be passed but the domains property should be empty; - /// it will be ignored in the handler as domains cannot be updated via this API. - pub async fn update_project( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateProject", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "UpdateProject"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a :ref:`ref_flyteidl.admin.Project` - pub async fn get_project( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetProject", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetProject")); - self.inner.unary(req, path, codec).await - } - /// Fetches a list of :ref:`ref_flyteidl.admin.Project` - pub async fn list_projects( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListProjects", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListProjects"), - ); - self.inner.unary(req, path, codec).await - } - /// Indicates a :ref:`ref_flyteidl.event.WorkflowExecutionEvent` has occurred. - pub async fn create_workflow_event( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowExecutionEventRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateWorkflowEvent", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "CreateWorkflowEvent", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Indicates a :ref:`ref_flyteidl.event.NodeExecutionEvent` has occurred. - pub async fn create_node_event( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NodeExecutionEventRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateNodeEvent", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "CreateNodeEvent"), - ); - self.inner.unary(req, path, codec).await - } - /// Indicates a :ref:`ref_flyteidl.event.TaskExecutionEvent` has occurred. - pub async fn create_task_event( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::TaskExecutionEventRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/CreateTaskEvent", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "CreateTaskEvent"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a :ref:`ref_flyteidl.admin.TaskExecution`. - pub async fn get_task_execution( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::TaskExecutionGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetTaskExecution", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetTaskExecution"), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches a list of :ref:`ref_flyteidl.admin.TaskExecution`. - pub async fn list_task_executions( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::TaskExecutionListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListTaskExecutions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListTaskExecutions", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches input and output data for a :ref:`ref_flyteidl.admin.TaskExecution`. - pub async fn get_task_execution_data( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::TaskExecutionGetDataRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetTaskExecutionData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetTaskExecutionData", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - pub async fn update_project_domain_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectDomainAttributesUpdateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateProjectDomainAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "UpdateProjectDomainAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - pub async fn get_project_domain_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectDomainAttributesGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetProjectDomainAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetProjectDomainAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - pub async fn delete_project_domain_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectDomainAttributesDeleteRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/DeleteProjectDomainAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "DeleteProjectDomainAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` at the project level - pub async fn update_project_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectAttributesUpdateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateProjectAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "UpdateProjectAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - pub async fn get_project_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectAttributesGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetProjectAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetProjectAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project and domain. - pub async fn delete_project_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ProjectAttributesDeleteRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/DeleteProjectAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "DeleteProjectAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Creates or updates custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - pub async fn update_workflow_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowAttributesUpdateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateWorkflowAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "UpdateWorkflowAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - pub async fn get_workflow_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowAttributesGetRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetWorkflowAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetWorkflowAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Deletes custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a project, domain and workflow. - pub async fn delete_workflow_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowAttributesDeleteRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/DeleteWorkflowAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "DeleteWorkflowAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Lists custom :ref:`ref_flyteidl.admin.MatchableAttributesConfiguration` for a specific resource type. - pub async fn list_matchable_attributes( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::ListMatchableAttributesRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListMatchableAttributes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListMatchableAttributes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a list of :ref:`ref_flyteidl.admin.NamedEntity` objects. - pub async fn list_named_entities( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListNamedEntities", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "ListNamedEntities"), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a :ref:`ref_flyteidl.admin.NamedEntity` object. - pub async fn get_named_entity( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetNamedEntity", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "GetNamedEntity"), - ); - self.inner.unary(req, path, codec).await - } - /// Updates a :ref:`ref_flyteidl.admin.NamedEntity` object. - pub async fn update_named_entity( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::NamedEntityUpdateRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/UpdateNamedEntity", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("flyteidl.service.AdminService", "UpdateNamedEntity"), - ); - self.inner.unary(req, path, codec).await - } - pub async fn get_version( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetVersion", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("flyteidl.service.AdminService", "GetVersion")); - self.inner.unary(req, path, codec).await - } - /// Fetch a :ref:`ref_flyteidl.admin.DescriptionEntity` object. - pub async fn get_description_entity( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetDescriptionEntity", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetDescriptionEntity", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetch a list of :ref:`ref_flyteidl.admin.DescriptionEntity` definitions. - pub async fn list_description_entities( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::DescriptionEntityListRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/ListDescriptionEntities", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "ListDescriptionEntities", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Fetches runtime metrics for a :ref:`ref_flyteidl.admin.Execution`. - pub async fn get_execution_metrics( - &mut self, - request: impl tonic::IntoRequest< - super::super::admin::WorkflowExecutionGetMetricsRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::new( - tonic::Code::Unknown, - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/flyteidl.service.AdminService/GetExecutionMetrics", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "flyteidl.service.AdminService", - "GetExecutionMetrics", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/google.api.rs b/flyrs/src/gen/pb_rust/flyteidl/google.api.rs deleted file mode 100644 index 2c0fd163be..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/google.api.rs +++ /dev/null @@ -1,367 +0,0 @@ -/// Defines the HTTP configuration for an API service. It contains a list of -/// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method -/// to one or more HTTP REST API methods. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Http { - /// A list of HTTP configuration rules that apply to individual API methods. - /// - /// **NOTE:** All service configuration rules follow "last one wins" order. - #[prost(message, repeated, tag = "1")] - pub rules: ::prost::alloc::vec::Vec, - /// When set to true, URL path parameters will be fully URI-decoded except in - /// cases of single segment matches in reserved expansion, where "%2F" will be - /// left encoded. - /// - /// The default behavior is to not decode RFC 6570 reserved characters in multi - /// segment matches. - #[prost(bool, tag = "2")] - pub fully_decode_reserved_expansion: bool, -} -/// # gRPC Transcoding -/// -/// gRPC Transcoding is a feature for mapping between a gRPC method and one or -/// more HTTP REST endpoints. It allows developers to build a single API service -/// that supports both gRPC APIs and REST APIs. Many systems, including [Google -/// APIs](), -/// [Cloud Endpoints](), [gRPC -/// Gateway](), -/// and [Envoy]() proxy support this feature -/// and use it for large scale production services. -/// -/// `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies -/// how different portions of the gRPC request message are mapped to the URL -/// path, URL query parameters, and HTTP request body. It also controls how the -/// gRPC response message is mapped to the HTTP response body. `HttpRule` is -/// typically specified as an `google.api.http` annotation on the gRPC method. -/// -/// Each mapping specifies a URL path template and an HTTP method. The path -/// template may refer to one or more fields in the gRPC request message, as long -/// as each field is a non-repeated field with a primitive (non-message) type. -/// The path template controls how fields of the request message are mapped to -/// the URL path. -/// -/// Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/{name=messages/*}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string name = 1; // Mapped to URL path. -/// } -/// message Message { -/// string text = 1; // The resource content. -/// } -/// -/// This enables an HTTP REST to gRPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` -/// -/// Any fields in the request message which are not bound by the path template -/// automatically become HTTP query parameters if there is no HTTP request body. -/// For example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get:"/v1/messages/{message_id}" -/// }; -/// } -/// } -/// message GetMessageRequest { -/// message SubMessage { -/// string subfield = 1; -/// } -/// string message_id = 1; // Mapped to URL path. -/// int64 revision = 2; // Mapped to URL query parameter `revision`. -/// SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. -/// } -/// -/// This enables a HTTP JSON to RPC mapping as below: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | -/// `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: -/// "foo"))` -/// -/// Note that fields which are mapped to URL query parameters must have a -/// primitive type or a repeated primitive type or a non-repeated message type. -/// In the case of a repeated type, the parameter can be repeated in the URL -/// as `...?param=A¶m=B`. In the case of a message type, each field of the -/// message is mapped to a separate parameter, such as -/// `...?foo.a=A&foo.b=B&foo.c=C`. -/// -/// For HTTP methods that allow a request body, the `body` field -/// specifies the mapping. Consider a REST update method on the -/// message resource collection: -/// -/// service Messaging { -/// rpc UpdateMessage(UpdateMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "message" -/// }; -/// } -/// } -/// message UpdateMessageRequest { -/// string message_id = 1; // mapped to the URL -/// Message message = 2; // mapped to the body -/// } -/// -/// The following HTTP JSON to RPC mapping is enabled, where the -/// representation of the JSON in the request body is determined by -/// protos JSON encoding: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" message { text: "Hi!" })` -/// -/// The special name `*` can be used in the body mapping to define that -/// every field not bound by the path template should be mapped to the -/// request body. This enables the following alternative definition of -/// the update method: -/// -/// service Messaging { -/// rpc UpdateMessage(Message) returns (Message) { -/// option (google.api.http) = { -/// patch: "/v1/messages/{message_id}" -/// body: "*" -/// }; -/// } -/// } -/// message Message { -/// string message_id = 1; -/// string text = 2; -/// } -/// -/// -/// The following HTTP JSON to RPC mapping is enabled: -/// -/// HTTP | gRPC -/// -----|----- -/// `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: -/// "123456" text: "Hi!")` -/// -/// Note that when using `*` in the body mapping, it is not possible to -/// have HTTP parameters, as all fields not bound by the path end in -/// the body. This makes this option more rarely used in practice when -/// defining REST APIs. The common usage of `*` is in custom methods -/// which don't use the URL at all for transferring data. -/// -/// It is possible to define multiple HTTP methods for one RPC by using -/// the `additional_bindings` option. Example: -/// -/// service Messaging { -/// rpc GetMessage(GetMessageRequest) returns (Message) { -/// option (google.api.http) = { -/// get: "/v1/messages/{message_id}" -/// additional_bindings { -/// get: "/v1/users/{user_id}/messages/{message_id}" -/// } -/// }; -/// } -/// } -/// message GetMessageRequest { -/// string message_id = 1; -/// string user_id = 2; -/// } -/// -/// This enables the following two alternative HTTP JSON to RPC mappings: -/// -/// HTTP | gRPC -/// -----|----- -/// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` -/// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: -/// "123456")` -/// -/// ## Rules for HTTP mapping -/// -/// 1. Leaf request fields (recursive expansion nested messages in the request -/// message) are classified into three categories: -/// - Fields referred by the path template. They are passed via the URL path. -/// - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They are passed via the HTTP -/// request body. -/// - All other fields are passed via the URL query parameters, and the -/// parameter name is the field path in the request message. A repeated -/// field can be represented as multiple query parameters under the same -/// name. -/// 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL query parameter, all fields -/// are passed via URL path and HTTP request body. -/// 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP request body, all -/// fields are passed via URL path and URL query parameters. -/// -/// ### Path template syntax -/// -/// Template = "/" Segments \[ Verb \] ; -/// Segments = Segment { "/" Segment } ; -/// Segment = "*" | "**" | LITERAL | Variable ; -/// Variable = "{" FieldPath \[ "=" Segments \] "}" ; -/// FieldPath = IDENT { "." IDENT } ; -/// Verb = ":" LITERAL ; -/// -/// The syntax `*` matches a single URL path segment. The syntax `**` matches -/// zero or more URL path segments, which must be the last part of the URL path -/// except the `Verb`. -/// -/// The syntax `Variable` matches part of the URL path as specified by its -/// template. A variable template must not contain other variables. If a variable -/// matches a single path segment, its template may be omitted, e.g. `{var}` -/// is equivalent to `{var=*}`. -/// -/// The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` -/// contains any reserved character, such characters should be percent-encoded -/// before the matching. -/// -/// If a variable contains exactly one path segment, such as `"{var}"` or -/// `"{var=*}"`, when such a variable is expanded into a URL path on the client -/// side, all characters except `\[-_.~0-9a-zA-Z\]` are percent-encoded. The -/// server side does the reverse decoding. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{var}`. -/// -/// If a variable contains multiple path segments, such as `"{var=foo/*}"` -/// or `"{var=**}"`, when such a variable is expanded into a URL path on the -/// client side, all characters except `\[-_.~/0-9a-zA-Z\]` are percent-encoded. -/// The server side does the reverse decoding, except "%2F" and "%2f" are left -/// unchanged. Such variables show up in the -/// [Discovery -/// Document]() as -/// `{+var}`. -/// -/// ## Using gRPC API Service Configuration -/// -/// gRPC API Service Configuration (service config) is a configuration language -/// for configuring a gRPC service to become a user-facing product. The -/// service config is simply the YAML representation of the `google.api.Service` -/// proto message. -/// -/// As an alternative to annotating your proto file, you can configure gRPC -/// transcoding in your service config YAML files. You do this by specifying a -/// `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same -/// effect as the proto annotation. This can be particularly useful if you -/// have a proto that is reused in multiple services. Note that any transcoding -/// specified in the service config will override any matching transcoding -/// configuration in the proto. -/// -/// Example: -/// -/// http: -/// rules: -/// # Selects a gRPC method and applies HttpRule to it. -/// - selector: example.v1.Messaging.GetMessage -/// get: /v1/messages/{message_id}/{sub.subfield} -/// -/// ## Special notes -/// -/// When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the -/// proto to JSON conversion must follow the [proto3 -/// specification](). -/// -/// While the single segment variable follows the semantics of -/// [RFC 6570]() Section 3.2.2 Simple String -/// Expansion, the multi segment variable **does not** follow RFC 6570 Section -/// 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion -/// does not expand special characters like `?` and `#`, which would lead -/// to invalid URLs. As the result, gRPC Transcoding uses a custom encoding -/// for multi segment variables. -/// -/// The path variables **must not** refer to any repeated or mapped field, -/// because client libraries are not capable of handling such variable expansion. -/// -/// The path variables **must not** capture the leading "/" character. The reason -/// is that the most common use case "{var}" does not capture the leading "/" -/// character. For consistency, all path variables must share the same behavior. -/// -/// Repeated message fields must not be mapped to URL query parameters, because -/// no client library can support such complicated mapping. -/// -/// If an API needs to use a JSON array for request or response body, it can map -/// the request or response body to a repeated field. However, some gRPC -/// Transcoding implementations may not support this feature. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HttpRule { - /// Selects a method to which this rule applies. - /// - /// Refer to [selector][google.api.DocumentationRule.selector] for syntax details. - #[prost(string, tag = "1")] - pub selector: ::prost::alloc::string::String, - /// The name of the request field whose value is mapped to the HTTP request - /// body, or `*` for mapping all request fields not captured by the path - /// pattern to the HTTP body, or omitted for not having any HTTP request body. - /// - /// NOTE: the referred field must be present at the top-level of the request - /// message type. - #[prost(string, tag = "7")] - pub body: ::prost::alloc::string::String, - /// Optional. The name of the response field whose value is mapped to the HTTP - /// response body. When omitted, the entire response message will be used - /// as the HTTP response body. - /// - /// NOTE: The referred field must be present at the top-level of the response - /// message type. - #[prost(string, tag = "12")] - pub response_body: ::prost::alloc::string::String, - /// Additional HTTP bindings for the selector. Nested bindings must - /// not contain an `additional_bindings` field themselves (that is, - /// the nesting may only be one level deep). - #[prost(message, repeated, tag = "11")] - pub additional_bindings: ::prost::alloc::vec::Vec, - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[prost(oneof = "http_rule::Pattern", tags = "2, 3, 4, 5, 6, 8")] - pub pattern: ::core::option::Option, -} -/// Nested message and enum types in `HttpRule`. -pub mod http_rule { - /// Determines the URL pattern is matched by this rules. This pattern can be - /// used with any of the {get|put|post|delete|patch} methods. A custom method - /// can be defined using the 'custom' field. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Pattern { - /// Maps to HTTP GET. Used for listing and getting information about - /// resources. - #[prost(string, tag = "2")] - Get(::prost::alloc::string::String), - /// Maps to HTTP PUT. Used for replacing a resource. - #[prost(string, tag = "3")] - Put(::prost::alloc::string::String), - /// Maps to HTTP POST. Used for creating a resource or performing an action. - #[prost(string, tag = "4")] - Post(::prost::alloc::string::String), - /// Maps to HTTP DELETE. Used for deleting a resource. - #[prost(string, tag = "5")] - Delete(::prost::alloc::string::String), - /// Maps to HTTP PATCH. Used for updating a resource. - #[prost(string, tag = "6")] - Patch(::prost::alloc::string::String), - /// The custom pattern is used for specifying an HTTP method that is not - /// included in the `pattern` field, such as HEAD, or "*" to leave the - /// HTTP method unspecified for this rule. The wild-card rule is useful - /// for services that provide content to Web (HTML) clients. - #[prost(message, tag = "8")] - Custom(super::CustomHttpPattern), - } -} -/// A custom pattern is used for defining custom HTTP verb. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CustomHttpPattern { - /// The name of this custom HTTP verb. - #[prost(string, tag = "1")] - pub kind: ::prost::alloc::string::String, - /// The path matched by this custom verb. - #[prost(string, tag = "2")] - pub path: ::prost::alloc::string::String, -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs b/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs deleted file mode 100644 index f9ca7e84ba..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/grpc.gateway.protoc_gen_openapiv2.options.rs +++ /dev/null @@ -1,1019 +0,0 @@ -/// `Swagger` is a representation of OpenAPI v2 specification's Swagger object. -/// -/// See: -/// -/// Example: -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -/// info: { -/// title: "Echo API"; -/// version: "1.0"; -/// description: ""; -/// contact: { -/// name: "gRPC-Gateway project"; -/// url: " -/// email: "none@example.com"; -/// }; -/// license: { -/// name: "BSD 3-Clause License"; -/// url: " -/// }; -/// }; -/// schemes: HTTPS; -/// consumes: "application/json"; -/// produces: "application/json"; -/// }; -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Swagger { - /// Specifies the OpenAPI Specification version being used. It can be - /// used by the OpenAPI UI and other clients to interpret the API listing. The - /// value MUST be "2.0". - #[prost(string, tag = "1")] - pub swagger: ::prost::alloc::string::String, - /// Provides metadata about the API. The metadata can be used by the - /// clients if needed. - #[prost(message, optional, tag = "2")] - pub info: ::core::option::Option, - /// The host (name or ip) serving the API. This MUST be the host only and does - /// not include the scheme nor sub-paths. It MAY include a port. If the host is - /// not included, the host serving the documentation is to be used (including - /// the port). The host does not support path templating. - #[prost(string, tag = "3")] - pub host: ::prost::alloc::string::String, - /// The base path on which the API is served, which is relative to the host. If - /// it is not included, the API is served directly under the host. The value - /// MUST start with a leading slash (/). The basePath does not support path - /// templating. - /// Note that using `base_path` does not change the endpoint paths that are - /// generated in the resulting OpenAPI file. If you wish to use `base_path` - /// with relatively generated OpenAPI paths, the `base_path` prefix must be - /// manually removed from your `google.api.http` paths and your code changed to - /// serve the API from the `base_path`. - #[prost(string, tag = "4")] - pub base_path: ::prost::alloc::string::String, - /// The transfer protocol of the API. Values MUST be from the list: "http", - /// "https", "ws", "wss". If the schemes is not included, the default scheme to - /// be used is the one used to access the OpenAPI definition itself. - #[prost(enumeration = "Scheme", repeated, tag = "5")] - pub schemes: ::prost::alloc::vec::Vec, - /// A list of MIME types the APIs can consume. This is global to all APIs but - /// can be overridden on specific API calls. Value MUST be as described under - /// Mime Types. - #[prost(string, repeated, tag = "6")] - pub consumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A list of MIME types the APIs can produce. This is global to all APIs but - /// can be overridden on specific API calls. Value MUST be as described under - /// Mime Types. - #[prost(string, repeated, tag = "7")] - pub produces: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// An object to hold responses that can be used across operations. This - /// property does not define global responses for all operations. - #[prost(map = "string, message", tag = "10")] - pub responses: ::std::collections::HashMap<::prost::alloc::string::String, Response>, - /// Security scheme definitions that can be used across the specification. - #[prost(message, optional, tag = "11")] - pub security_definitions: ::core::option::Option, - /// A declaration of which security schemes are applied for the API as a whole. - /// The list of values describes alternative security schemes that can be used - /// (that is, there is a logical OR between the security requirements). - /// Individual operations can override this definition. - #[prost(message, repeated, tag = "12")] - pub security: ::prost::alloc::vec::Vec, - /// A list of tags for API documentation control. Tags can be used for logical - /// grouping of operations by resources or any other qualifier. - #[prost(message, repeated, tag = "13")] - pub tags: ::prost::alloc::vec::Vec, - /// Additional external documentation. - #[prost(message, optional, tag = "14")] - pub external_docs: ::core::option::Option, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "15")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// `Operation` is a representation of OpenAPI v2 specification's Operation object. -/// -/// See: -/// -/// Example: -/// -/// service EchoService { -/// rpc Echo(SimpleMessage) returns (SimpleMessage) { -/// option (google.api.http) = { -/// get: "/v1/example/echo/{id}" -/// }; -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { -/// summary: "Get a message."; -/// operation_id: "getMessage"; -/// tags: "echo"; -/// responses: { -/// key: "200" -/// value: { -/// description: "OK"; -/// } -/// } -/// }; -/// } -/// } -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Operation { - /// A list of tags for API documentation control. Tags can be used for logical - /// grouping of operations by resources or any other qualifier. - #[prost(string, repeated, tag = "1")] - pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A short summary of what the operation does. For maximum readability in the - /// swagger-ui, this field SHOULD be less than 120 characters. - #[prost(string, tag = "2")] - pub summary: ::prost::alloc::string::String, - /// A verbose explanation of the operation behavior. GFM syntax can be used for - /// rich text representation. - #[prost(string, tag = "3")] - pub description: ::prost::alloc::string::String, - /// Additional external documentation for this operation. - #[prost(message, optional, tag = "4")] - pub external_docs: ::core::option::Option, - /// Unique string used to identify the operation. The id MUST be unique among - /// all operations described in the API. Tools and libraries MAY use the - /// operationId to uniquely identify an operation, therefore, it is recommended - /// to follow common programming naming conventions. - #[prost(string, tag = "5")] - pub operation_id: ::prost::alloc::string::String, - /// A list of MIME types the operation can consume. This overrides the consumes - /// definition at the OpenAPI Object. An empty value MAY be used to clear the - /// global definition. Value MUST be as described under Mime Types. - #[prost(string, repeated, tag = "6")] - pub consumes: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// A list of MIME types the operation can produce. This overrides the produces - /// definition at the OpenAPI Object. An empty value MAY be used to clear the - /// global definition. Value MUST be as described under Mime Types. - #[prost(string, repeated, tag = "7")] - pub produces: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// The list of possible responses as they are returned from executing this - /// operation. - #[prost(map = "string, message", tag = "9")] - pub responses: ::std::collections::HashMap<::prost::alloc::string::String, Response>, - /// The transfer protocol for the operation. Values MUST be from the list: - /// "http", "https", "ws", "wss". The value overrides the OpenAPI Object - /// schemes definition. - #[prost(enumeration = "Scheme", repeated, tag = "10")] - pub schemes: ::prost::alloc::vec::Vec, - /// Declares this operation to be deprecated. Usage of the declared operation - /// should be refrained. Default value is false. - #[prost(bool, tag = "11")] - pub deprecated: bool, - /// A declaration of which security schemes are applied for this operation. The - /// list of values describes alternative security schemes that can be used - /// (that is, there is a logical OR between the security requirements). This - /// definition overrides any declared top-level security. To remove a top-level - /// security declaration, an empty array can be used. - #[prost(message, repeated, tag = "12")] - pub security: ::prost::alloc::vec::Vec, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "13")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, - /// Custom parameters such as HTTP request headers. - /// See: - /// and - #[prost(message, optional, tag = "14")] - pub parameters: ::core::option::Option, -} -/// `Parameters` is a representation of OpenAPI v2 specification's parameters object. -/// Note: This technically breaks compatibility with the OpenAPI 2 definition structure as we only -/// allow header parameters to be set here since we do not want users specifying custom non-header -/// parameters beyond those inferred from the Protobuf schema. -/// See: -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Parameters { - /// `Headers` is one or more HTTP header parameter. - /// See: - #[prost(message, repeated, tag = "1")] - pub headers: ::prost::alloc::vec::Vec, -} -/// `HeaderParameter` a HTTP header parameter. -/// See: -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct HeaderParameter { - /// `Name` is the header name. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// `Description` is a short description of the header. - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// `Type` is the type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. - /// See: - #[prost(enumeration = "header_parameter::Type", tag = "3")] - pub r#type: i32, - /// `Format` The extending format for the previously mentioned type. - #[prost(string, tag = "4")] - pub format: ::prost::alloc::string::String, - /// `Required` indicates if the header is optional - #[prost(bool, tag = "5")] - pub required: bool, -} -/// Nested message and enum types in `HeaderParameter`. -pub mod header_parameter { - /// `Type` is a a supported HTTP header type. - /// See - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Type { - Unknown = 0, - String = 1, - Number = 2, - Integer = 3, - Boolean = 4, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Unknown => "UNKNOWN", - Type::String => "STRING", - Type::Number => "NUMBER", - Type::Integer => "INTEGER", - Type::Boolean => "BOOLEAN", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "STRING" => Some(Self::String), - "NUMBER" => Some(Self::Number), - "INTEGER" => Some(Self::Integer), - "BOOLEAN" => Some(Self::Boolean), - _ => None, - } - } - } -} -/// `Header` is a representation of OpenAPI v2 specification's Header object. -/// -/// See: -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Header { - /// `Description` is a short description of the header. - #[prost(string, tag = "1")] - pub description: ::prost::alloc::string::String, - /// The type of the object. The value MUST be one of "string", "number", "integer", or "boolean". The "array" type is not supported. - #[prost(string, tag = "2")] - pub r#type: ::prost::alloc::string::String, - /// `Format` The extending format for the previously mentioned type. - #[prost(string, tag = "3")] - pub format: ::prost::alloc::string::String, - /// `Default` Declares the value of the header that the server will use if none is provided. - /// See: - /// Unlike JSON Schema this value MUST conform to the defined type for the header. - #[prost(string, tag = "6")] - pub default: ::prost::alloc::string::String, - /// 'Pattern' See - #[prost(string, tag = "13")] - pub pattern: ::prost::alloc::string::String, -} -/// `Response` is a representation of OpenAPI v2 specification's Response object. -/// -/// See: -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Response { - /// `Description` is a short description of the response. - /// GFM syntax can be used for rich text representation. - #[prost(string, tag = "1")] - pub description: ::prost::alloc::string::String, - /// `Schema` optionally defines the structure of the response. - /// If `Schema` is not provided, it means there is no content to the response. - #[prost(message, optional, tag = "2")] - pub schema: ::core::option::Option, - /// `Headers` A list of headers that are sent with the response. - /// `Header` name is expected to be a string in the canonical format of the MIME header key - /// See: - #[prost(map = "string, message", tag = "3")] - pub headers: ::std::collections::HashMap<::prost::alloc::string::String, Header>, - /// `Examples` gives per-mimetype response examples. - /// See: - #[prost(map = "string, string", tag = "4")] - pub examples: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "5")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// `Info` is a representation of OpenAPI v2 specification's Info object. -/// -/// See: -/// -/// Example: -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -/// info: { -/// title: "Echo API"; -/// version: "1.0"; -/// description: ""; -/// contact: { -/// name: "gRPC-Gateway project"; -/// url: " -/// email: "none@example.com"; -/// }; -/// license: { -/// name: "BSD 3-Clause License"; -/// url: " -/// }; -/// }; -/// ... -/// }; -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Info { - /// The title of the application. - #[prost(string, tag = "1")] - pub title: ::prost::alloc::string::String, - /// A short description of the application. GFM syntax can be used for rich - /// text representation. - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// The Terms of Service for the API. - #[prost(string, tag = "3")] - pub terms_of_service: ::prost::alloc::string::String, - /// The contact information for the exposed API. - #[prost(message, optional, tag = "4")] - pub contact: ::core::option::Option, - /// The license information for the exposed API. - #[prost(message, optional, tag = "5")] - pub license: ::core::option::Option, - /// Provides the version of the application API (not to be confused - /// with the specification version). - #[prost(string, tag = "6")] - pub version: ::prost::alloc::string::String, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "7")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// `Contact` is a representation of OpenAPI v2 specification's Contact object. -/// -/// See: -/// -/// Example: -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -/// info: { -/// ... -/// contact: { -/// name: "gRPC-Gateway project"; -/// url: " -/// email: "none@example.com"; -/// }; -/// ... -/// }; -/// ... -/// }; -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Contact { - /// The identifying name of the contact person/organization. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// The URL pointing to the contact information. MUST be in the format of a - /// URL. - #[prost(string, tag = "2")] - pub url: ::prost::alloc::string::String, - /// The email address of the contact person/organization. MUST be in the format - /// of an email address. - #[prost(string, tag = "3")] - pub email: ::prost::alloc::string::String, -} -/// `License` is a representation of OpenAPI v2 specification's License object. -/// -/// See: -/// -/// Example: -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -/// info: { -/// ... -/// license: { -/// name: "BSD 3-Clause License"; -/// url: " -/// }; -/// ... -/// }; -/// ... -/// }; -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct License { - /// The license name used for the API. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// A URL to the license used for the API. MUST be in the format of a URL. - #[prost(string, tag = "2")] - pub url: ::prost::alloc::string::String, -} -/// `ExternalDocumentation` is a representation of OpenAPI v2 specification's -/// ExternalDocumentation object. -/// -/// See: -/// -/// Example: -/// -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_swagger) = { -/// ... -/// external_docs: { -/// description: "More about gRPC-Gateway"; -/// url: " -/// } -/// ... -/// }; -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ExternalDocumentation { - /// A short description of the target documentation. GFM syntax can be used for - /// rich text representation. - #[prost(string, tag = "1")] - pub description: ::prost::alloc::string::String, - /// The URL for the target documentation. Value MUST be in the format - /// of a URL. - #[prost(string, tag = "2")] - pub url: ::prost::alloc::string::String, -} -/// `Schema` is a representation of OpenAPI v2 specification's Schema object. -/// -/// See: -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Schema { - #[prost(message, optional, tag = "1")] - pub json_schema: ::core::option::Option, - /// Adds support for polymorphism. The discriminator is the schema property - /// name that is used to differentiate between other schema that inherit this - /// schema. The property name used MUST be defined at this schema and it MUST - /// be in the required property list. When used, the value MUST be the name of - /// this schema or any schema that inherits it. - #[prost(string, tag = "2")] - pub discriminator: ::prost::alloc::string::String, - /// Relevant only for Schema "properties" definitions. Declares the property as - /// "read only". This means that it MAY be sent as part of a response but MUST - /// NOT be sent as part of the request. Properties marked as readOnly being - /// true SHOULD NOT be in the required list of the defined schema. Default - /// value is false. - #[prost(bool, tag = "3")] - pub read_only: bool, - /// Additional external documentation for this schema. - #[prost(message, optional, tag = "5")] - pub external_docs: ::core::option::Option, - /// A free-form property to include an example of an instance for this schema in JSON. - /// This is copied verbatim to the output. - #[prost(string, tag = "6")] - pub example: ::prost::alloc::string::String, -} -/// `JSONSchema` represents properties from JSON Schema taken, and as used, in -/// the OpenAPI v2 spec. -/// -/// This includes changes made by OpenAPI v2. -/// -/// See: -/// -/// See also: -/// -/// -/// Example: -/// -/// message SimpleMessage { -/// option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_schema) = { -/// json_schema: { -/// title: "SimpleMessage" -/// description: "A simple message." -/// required: \["id"\] -/// } -/// }; -/// -/// // Id represents the message identifier. -/// string id = 1; [ -/// (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_field) = { -/// description: "The unique identifier of the simple message." -/// }]; -/// } -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct JsonSchema { - /// Ref is used to define an external reference to include in the message. - /// This could be a fully qualified proto message reference, and that type must - /// be imported into the protofile. If no message is identified, the Ref will - /// be used verbatim in the output. - /// For example: - /// `ref: ".google.protobuf.Timestamp"`. - #[prost(string, tag = "3")] - pub r#ref: ::prost::alloc::string::String, - /// The title of the schema. - #[prost(string, tag = "5")] - pub title: ::prost::alloc::string::String, - /// A short description of the schema. - #[prost(string, tag = "6")] - pub description: ::prost::alloc::string::String, - #[prost(string, tag = "7")] - pub default: ::prost::alloc::string::String, - #[prost(bool, tag = "8")] - pub read_only: bool, - /// A free-form property to include a JSON example of this field. This is copied - /// verbatim to the output swagger.json. Quotes must be escaped. - /// This property is the same for 2.0 and 3.0.0 - #[prost(string, tag = "9")] - pub example: ::prost::alloc::string::String, - #[prost(double, tag = "10")] - pub multiple_of: f64, - /// Maximum represents an inclusive upper limit for a numeric instance. The - /// value of MUST be a number, - #[prost(double, tag = "11")] - pub maximum: f64, - #[prost(bool, tag = "12")] - pub exclusive_maximum: bool, - /// minimum represents an inclusive lower limit for a numeric instance. The - /// value of MUST be a number, - #[prost(double, tag = "13")] - pub minimum: f64, - #[prost(bool, tag = "14")] - pub exclusive_minimum: bool, - #[prost(uint64, tag = "15")] - pub max_length: u64, - #[prost(uint64, tag = "16")] - pub min_length: u64, - #[prost(string, tag = "17")] - pub pattern: ::prost::alloc::string::String, - #[prost(uint64, tag = "20")] - pub max_items: u64, - #[prost(uint64, tag = "21")] - pub min_items: u64, - #[prost(bool, tag = "22")] - pub unique_items: bool, - #[prost(uint64, tag = "24")] - pub max_properties: u64, - #[prost(uint64, tag = "25")] - pub min_properties: u64, - #[prost(string, repeated, tag = "26")] - pub required: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Items in 'array' must be unique. - #[prost(string, repeated, tag = "34")] - pub array: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - #[prost(enumeration = "json_schema::JsonSchemaSimpleTypes", repeated, tag = "35")] - pub r#type: ::prost::alloc::vec::Vec, - /// `Format` - #[prost(string, tag = "36")] - pub format: ::prost::alloc::string::String, - /// Items in `enum` must be unique - #[prost(string, repeated, tag = "46")] - pub r#enum: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - /// Additional field level properties used when generating the OpenAPI v2 file. - #[prost(message, optional, tag = "1001")] - pub field_configuration: ::core::option::Option, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "48")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// Nested message and enum types in `JSONSchema`. -pub mod json_schema { - /// 'FieldConfiguration' provides additional field level properties used when generating the OpenAPI v2 file. - /// These properties are not defined by OpenAPIv2, but they are used to control the generation. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct FieldConfiguration { - /// Alternative parameter name when used as path parameter. If set, this will - /// be used as the complete parameter name when this field is used as a path - /// parameter. Use this to avoid having auto generated path parameter names - /// for overlapping paths. - #[prost(string, tag = "47")] - pub path_param_name: ::prost::alloc::string::String, - } - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum JsonSchemaSimpleTypes { - Unknown = 0, - Array = 1, - Boolean = 2, - Integer = 3, - Null = 4, - Number = 5, - Object = 6, - String = 7, - } - impl JsonSchemaSimpleTypes { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - JsonSchemaSimpleTypes::Unknown => "UNKNOWN", - JsonSchemaSimpleTypes::Array => "ARRAY", - JsonSchemaSimpleTypes::Boolean => "BOOLEAN", - JsonSchemaSimpleTypes::Integer => "INTEGER", - JsonSchemaSimpleTypes::Null => "NULL", - JsonSchemaSimpleTypes::Number => "NUMBER", - JsonSchemaSimpleTypes::Object => "OBJECT", - JsonSchemaSimpleTypes::String => "STRING", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "ARRAY" => Some(Self::Array), - "BOOLEAN" => Some(Self::Boolean), - "INTEGER" => Some(Self::Integer), - "NULL" => Some(Self::Null), - "NUMBER" => Some(Self::Number), - "OBJECT" => Some(Self::Object), - "STRING" => Some(Self::String), - _ => None, - } - } - } -} -/// `Tag` is a representation of OpenAPI v2 specification's Tag object. -/// -/// See: -/// -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Tag { - /// The name of the tag. Use it to allow override of the name of a - /// global Tag object, then use that name to reference the tag throughout the - /// OpenAPI file. - #[prost(string, tag = "1")] - pub name: ::prost::alloc::string::String, - /// A short description for the tag. GFM syntax can be used for rich text - /// representation. - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// Additional external documentation for this tag. - #[prost(message, optional, tag = "3")] - pub external_docs: ::core::option::Option, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "4")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// `SecurityDefinitions` is a representation of OpenAPI v2 specification's -/// Security Definitions object. -/// -/// See: -/// -/// A declaration of the security schemes available to be used in the -/// specification. This does not enforce the security schemes on the operations -/// and only serves to provide the relevant details for each scheme. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SecurityDefinitions { - /// A single security scheme definition, mapping a "name" to the scheme it - /// defines. - #[prost(map = "string, message", tag = "1")] - pub security: ::std::collections::HashMap< - ::prost::alloc::string::String, - SecurityScheme, - >, -} -/// `SecurityScheme` is a representation of OpenAPI v2 specification's -/// Security Scheme object. -/// -/// See: -/// -/// Allows the definition of a security scheme that can be used by the -/// operations. Supported schemes are basic authentication, an API key (either as -/// a header or as a query parameter) and OAuth2's common flows (implicit, -/// password, application and access code). -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SecurityScheme { - /// The type of the security scheme. Valid values are "basic", - /// "apiKey" or "oauth2". - #[prost(enumeration = "security_scheme::Type", tag = "1")] - pub r#type: i32, - /// A short description for security scheme. - #[prost(string, tag = "2")] - pub description: ::prost::alloc::string::String, - /// The name of the header or query parameter to be used. - /// Valid for apiKey. - #[prost(string, tag = "3")] - pub name: ::prost::alloc::string::String, - /// The location of the API key. Valid values are "query" or - /// "header". - /// Valid for apiKey. - #[prost(enumeration = "security_scheme::In", tag = "4")] - pub r#in: i32, - /// The flow used by the OAuth2 security scheme. Valid values are - /// "implicit", "password", "application" or "accessCode". - /// Valid for oauth2. - #[prost(enumeration = "security_scheme::Flow", tag = "5")] - pub flow: i32, - /// The authorization URL to be used for this flow. This SHOULD be in - /// the form of a URL. - /// Valid for oauth2/implicit and oauth2/accessCode. - #[prost(string, tag = "6")] - pub authorization_url: ::prost::alloc::string::String, - /// The token URL to be used for this flow. This SHOULD be in the - /// form of a URL. - /// Valid for oauth2/password, oauth2/application and oauth2/accessCode. - #[prost(string, tag = "7")] - pub token_url: ::prost::alloc::string::String, - /// The available scopes for the OAuth2 security scheme. - /// Valid for oauth2. - #[prost(message, optional, tag = "8")] - pub scopes: ::core::option::Option, - /// Custom properties that start with "x-" such as "x-foo" used to describe - /// extra functionality that is not covered by the standard OpenAPI Specification. - /// See: - #[prost(map = "string, message", tag = "9")] - pub extensions: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost_types::Value, - >, -} -/// Nested message and enum types in `SecurityScheme`. -pub mod security_scheme { - /// The type of the security scheme. Valid values are "basic", - /// "apiKey" or "oauth2". - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Type { - Invalid = 0, - Basic = 1, - ApiKey = 2, - Oauth2 = 3, - } - impl Type { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Type::Invalid => "TYPE_INVALID", - Type::Basic => "TYPE_BASIC", - Type::ApiKey => "TYPE_API_KEY", - Type::Oauth2 => "TYPE_OAUTH2", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "TYPE_INVALID" => Some(Self::Invalid), - "TYPE_BASIC" => Some(Self::Basic), - "TYPE_API_KEY" => Some(Self::ApiKey), - "TYPE_OAUTH2" => Some(Self::Oauth2), - _ => None, - } - } - } - /// The location of the API key. Valid values are "query" or "header". - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum In { - Invalid = 0, - Query = 1, - Header = 2, - } - impl In { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - In::Invalid => "IN_INVALID", - In::Query => "IN_QUERY", - In::Header => "IN_HEADER", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "IN_INVALID" => Some(Self::Invalid), - "IN_QUERY" => Some(Self::Query), - "IN_HEADER" => Some(Self::Header), - _ => None, - } - } - } - /// The flow used by the OAuth2 security scheme. Valid values are - /// "implicit", "password", "application" or "accessCode". - #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - Hash, - PartialOrd, - Ord, - ::prost::Enumeration - )] - #[repr(i32)] - pub enum Flow { - Invalid = 0, - Implicit = 1, - Password = 2, - Application = 3, - AccessCode = 4, - } - impl Flow { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Flow::Invalid => "FLOW_INVALID", - Flow::Implicit => "FLOW_IMPLICIT", - Flow::Password => "FLOW_PASSWORD", - Flow::Application => "FLOW_APPLICATION", - Flow::AccessCode => "FLOW_ACCESS_CODE", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "FLOW_INVALID" => Some(Self::Invalid), - "FLOW_IMPLICIT" => Some(Self::Implicit), - "FLOW_PASSWORD" => Some(Self::Password), - "FLOW_APPLICATION" => Some(Self::Application), - "FLOW_ACCESS_CODE" => Some(Self::AccessCode), - _ => None, - } - } - } -} -/// `SecurityRequirement` is a representation of OpenAPI v2 specification's -/// Security Requirement object. -/// -/// See: -/// -/// Lists the required security schemes to execute this operation. The object can -/// have multiple security schemes declared in it which are all required (that -/// is, there is a logical AND between the schemes). -/// -/// The name used for each property MUST correspond to a security scheme -/// declared in the Security Definitions. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SecurityRequirement { - /// Each name must correspond to a security scheme which is declared in - /// the Security Definitions. If the security scheme is of type "oauth2", - /// then the value is a list of scope names required for the execution. - /// For other security scheme types, the array MUST be empty. - #[prost(map = "string, message", tag = "1")] - pub security_requirement: ::std::collections::HashMap< - ::prost::alloc::string::String, - security_requirement::SecurityRequirementValue, - >, -} -/// Nested message and enum types in `SecurityRequirement`. -pub mod security_requirement { - /// If the security scheme is of type "oauth2", then the value is a list of - /// scope names required for the execution. For other security scheme types, - /// the array MUST be empty. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct SecurityRequirementValue { - #[prost(string, repeated, tag = "1")] - pub scope: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, - } -} -/// `Scopes` is a representation of OpenAPI v2 specification's Scopes object. -/// -/// See: -/// -/// Lists the available scopes for an OAuth2 security scheme. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct Scopes { - /// Maps between a name of a scope to a short description of it (as the value - /// of the property). - #[prost(map = "string, string", tag = "1")] - pub scope: ::std::collections::HashMap< - ::prost::alloc::string::String, - ::prost::alloc::string::String, - >, -} -/// Scheme describes the schemes supported by the OpenAPI Swagger -/// and Operation objects. -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum Scheme { - Unknown = 0, - Http = 1, - Https = 2, - Ws = 3, - Wss = 4, -} -impl Scheme { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - Scheme::Unknown => "UNKNOWN", - Scheme::Http => "HTTP", - Scheme::Https => "HTTPS", - Scheme::Ws => "WS", - Scheme::Wss => "WSS", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN" => Some(Self::Unknown), - "HTTP" => Some(Self::Http), - "HTTPS" => Some(Self::Https), - "WS" => Some(Self::Ws), - "WSS" => Some(Self::Wss), - _ => None, - } - } -} diff --git a/flyrs/src/gen/pb_rust/flyteidl/lib.rs b/flyrs/src/gen/pb_rust/flyteidl/lib.rs deleted file mode 100644 index 7720f6fdc4..0000000000 --- a/flyrs/src/gen/pb_rust/flyteidl/lib.rs +++ /dev/null @@ -1,27 +0,0 @@ -pub mod datacatalog { - include!("datacatalog.rs"); -} -pub mod flyteidl { - - pub mod admin { - include!("flyteidl.admin.rs"); - } - pub mod cache { - include!("flyteidl.cacheservice.rs"); - } - pub mod core { - include!("flyteidl.core.rs"); - } - pub mod event { - include!("flyteidl.event.rs"); - } - pub mod plugins { - include!("flyteidl.plugins.rs"); - pub mod kubeflow{ - include!("flyteidl.plugins.kubeflow.rs"); - } - } - pub mod service { - include!("flyteidl.service.rs"); - } -} \ No newline at end of file diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 5fda151984..106bc8c760 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -2,7 +2,7 @@ use prost::Message; use pyo3::prelude::*; use pyo3::types::PyBytes; use tokio::runtime::{Builder, Runtime}; -use tonic::{transport::{Channel}}; +use tonic::transport::Channel; use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; use flyteidl::flyteidl::admin::{Task, ObjectGetRequest, ResourceListRequest, TaskExecutionGetRequest}; diff --git a/flyrs/test_flytekit_remote.py b/flyrs/test_flytekit_remote.py index f62d72accb..303aa6dab7 100644 --- a/flyrs/test_flytekit_remote.py +++ b/flyrs/test_flytekit_remote.py @@ -1,6 +1,3 @@ -import timeit -import matplotlib.pyplot as plt - from flytekit.configuration import Config from flytekit.remote import FlyteRemote @@ -11,42 +8,12 @@ task_py = remote_py.fetch_task( project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" ) -# print(task_py) +print(task_py) remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN) task_rs = remote_rs.fetch_task( project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" ) -# print(task_rs) - -print(task_py == task_rs) - - -setup = """ -from flytekit.remote import FlyteRemote; -from flytekit.configuration import Config; -PROJECT = "flytesnacks"; -DOMAIN = "development"; -remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN); -remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN); -""" - -fetch_task_in_py = """task_py = remote_py.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" -fetch_task_in_rs = """task_rs = remote_rs.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" +print(task_rs) -r = 10 -Xs = [1, 10, 100, 1000] -py_elpased, rs_elpased = [], [] -for x in Xs: - # Python gRPC - py_elpased.append(sum(timeit.repeat(fetch_task_in_py, setup=setup, repeat=r, number=x))/r) - print() - # Rust gRPC - rs_elpased.append(sum(timeit.repeat(fetch_task_in_rs, setup=setup, repeat=r, number=x))/r) - print() -plt.xlabel('# of fetched tasks') -plt.ylabel('average elapsed time (s)') -plt.plot(Xs, py_elpased,'r-',label='Python gRPC') -plt.plot(Xs, rs_elpased,'b-',label='Rust gRPC') -plt.legend() -plt.savefig("perf.png") \ No newline at end of file +assert task_py == task_rs \ No newline at end of file From 3ff5dc849288f6e5a905e67c65f748266f641ab7 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 12 Apr 2024 16:21:29 +0800 Subject: [PATCH 07/16] re-org Signed-off-by: Austin Liu --- flyrs/.gitignore | 2 - flyrs/Cargo.lock | 1129 ++++++++ .../friendly_rs.py => flyrs/friendly.py | 0 flyrs/remote.py | 2353 +++++++++++++++++ flyrs/src/lib.rs | 17 +- ...flytekit_remote.py => test_FlyteRemote.py} | 3 +- flytekit/remote/remote.py | 6 +- 7 files changed, 3494 insertions(+), 16 deletions(-) create mode 100644 flyrs/Cargo.lock rename flytekit/clients/friendly_rs.py => flyrs/friendly.py (100%) create mode 100644 flyrs/remote.py rename flyrs/{test_flytekit_remote.py => test_FlyteRemote.py} (90%) diff --git a/flyrs/.gitignore b/flyrs/.gitignore index 95b531ed13..15640f3707 100644 --- a/flyrs/.gitignore +++ b/flyrs/.gitignore @@ -1,7 +1,5 @@ /target -Cargo.lock - pyproject.toml diff --git a/flyrs/Cargo.lock b/flyrs/Cargo.lock new file mode 100644 index 0000000000..6aed12e917 --- /dev/null +++ b/flyrs/Cargo.lock @@ -0,0 +1,1129 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "anyhow" +version = "1.0.81" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "async-trait" +version = "0.1.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "autocfg" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" + +[[package]] +name = "axum" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "sync_wrapper", + "tower", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "rustversion", + "tower-layer", + "tower-service", +] + +[[package]] +name = "backtrace" +version = "0.3.71" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "base64" +version = "0.21.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" + +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + +[[package]] +name = "bytes" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" + +[[package]] +name = "cc" +version = "1.0.90" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "either" +version = "1.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" + +[[package]] +name = "equivalent" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" + +[[package]] +name = "flyrs" +version = "0.1.0" +dependencies = [ + "flyteidl", + "prost", + "pyo3", + "tokio", + "tonic", +] + +[[package]] +name = "flyteidl" +version = "0.1.0" +dependencies = [ + "prost", + "prost-types", + "tonic", +] + +[[package]] +name = "fnv" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + +[[package]] +name = "futures-channel" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" + +[[package]] +name = "futures-sink" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" + +[[package]] +name = "futures-task" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" + +[[package]] +name = "futures-util" +version = "0.3.30" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +dependencies = [ + "futures-core", + "futures-task", + "pin-project-lite", + "pin-utils", +] + +[[package]] +name = "getrandom" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "gimli" +version = "0.28.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" + +[[package]] +name = "h2" +version = "0.3.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" +dependencies = [ + "bytes", + "fnv", + "futures-core", + "futures-sink", + "futures-util", + "http", + "indexmap 2.2.6", + "slab", + "tokio", + "tokio-util", + "tracing", +] + +[[package]] +name = "hashbrown" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" + +[[package]] +name = "hashbrown" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "hermit-abi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" + +[[package]] +name = "http" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" +dependencies = [ + "bytes", + "fnv", + "itoa", +] + +[[package]] +name = "http-body" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" +dependencies = [ + "bytes", + "http", + "pin-project-lite", +] + +[[package]] +name = "httparse" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" + +[[package]] +name = "httpdate" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + +[[package]] +name = "hyper" +version = "0.14.28" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf96e135eb83a2a8ddf766e426a841d8ddd7449d5f00d34ea02b41d2f19eef80" +dependencies = [ + "bytes", + "futures-channel", + "futures-core", + "futures-util", + "h2", + "http", + "http-body", + "httparse", + "httpdate", + "itoa", + "pin-project-lite", + "socket2", + "tokio", + "tower-service", + "tracing", + "want", +] + +[[package]] +name = "hyper-timeout" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +dependencies = [ + "hyper", + "pin-project-lite", + "tokio", + "tokio-io-timeout", +] + +[[package]] +name = "indexmap" +version = "1.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" +dependencies = [ + "autocfg", + "hashbrown 0.12.3", +] + +[[package]] +name = "indexmap" +version = "2.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +dependencies = [ + "equivalent", + "hashbrown 0.14.3", +] + +[[package]] +name = "indoc" +version = "2.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" + +[[package]] +name = "itertools" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" + +[[package]] +name = "libc" +version = "0.2.153" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" + +[[package]] +name = "lock_api" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +dependencies = [ + "autocfg", + "scopeguard", +] + +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + +[[package]] +name = "memchr" +version = "2.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" + +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "mime" +version = "0.3.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + +[[package]] +name = "miniz_oxide" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +dependencies = [ + "adler", +] + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "wasi", + "windows-sys 0.48.0", +] + +[[package]] +name = "num_cpus" +version = "1.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +dependencies = [ + "hermit-abi", + "libc", +] + +[[package]] +name = "object" +version = "0.32.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6a622008b6e321afc04970976f62ee297fdbaa6f95318ca343e3eebb9648441" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-targets 0.48.5", +] + +[[package]] +name = "percent-encoding" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" + +[[package]] +name = "pin-project" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +dependencies = [ + "pin-project-internal", +] + +[[package]] +name = "pin-project-internal" +version = "1.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "pin-utils" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + +[[package]] +name = "portable-atomic" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" + +[[package]] +name = "ppv-lite86" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" + +[[package]] +name = "proc-macro2" +version = "1.0.79" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c289cda302b98a28d40c8b3b90498d6e526dd24ac2ecea73e4e491685b94a" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efb6c9a1dd1def8e2124d17e83a20af56f1570d6c2d2bd9e266ccb768df3840e" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "prost-types" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "193898f59edcf43c26227dcd4c8427f00d99d61e95dcde58dabd49fa291d470e" +dependencies = [ + "prost", +] + +[[package]] +name = "pyo3" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a02a88a17e74cadbc8ce77855e1d6c8ad0ab82901a4a9b5046bd01c1c0bd95cd" +dependencies = [ + "cfg-if", + "indoc", + "libc", + "memoffset", + "parking_lot", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", + "unindent", +] + +[[package]] +name = "pyo3-build-config" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5eb0b6ecba38961f6f4bd6cd5906dfab3cd426ff37b2eed5771006aa31656f1" +dependencies = [ + "once_cell", + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba8a6e48a29b5d22e4fdaf132d8ba8d3203ee9f06362d48f244346902a594ec3" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-macros" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4e80493c5965f94a747d0782a607b2328a4eea5391327b152b00e2f3b001cede" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcd7d86f42004025200e12a6a8119bd878329e6fddef8178eaafa4e4b5906c5b" +dependencies = [ + "heck", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn", +] + +[[package]] +name = "quote" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +dependencies = [ + "libc", + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +dependencies = [ + "getrandom", +] + +[[package]] +name = "redox_syscall" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +dependencies = [ + "bitflags", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustversion" +version = "1.0.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "serde" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.197" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "signal-hook-registry" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +dependencies = [ + "libc", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" + +[[package]] +name = "socket2" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +dependencies = [ + "libc", + "windows-sys 0.52.0", +] + +[[package]] +name = "syn" +version = "2.0.55" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "sync_wrapper" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" + +[[package]] +name = "target-lexicon" +version = "0.12.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" + +[[package]] +name = "tokio" +version = "1.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" +dependencies = [ + "backtrace", + "bytes", + "libc", + "mio", + "num_cpus", + "parking_lot", + "pin-project-lite", + "signal-hook-registry", + "socket2", + "tokio-macros", + "windows-sys 0.48.0", +] + +[[package]] +name = "tokio-io-timeout" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +dependencies = [ + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-macros" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tokio-stream" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tokio-util" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", + "tracing", +] + +[[package]] +name = "tonic" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +dependencies = [ + "async-stream", + "async-trait", + "axum", + "base64", + "bytes", + "h2", + "http", + "http-body", + "hyper", + "hyper-timeout", + "percent-encoding", + "pin-project", + "prost", + "tokio", + "tokio-stream", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "indexmap 1.9.3", + "pin-project", + "pin-project-lite", + "rand", + "slab", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", +] + +[[package]] +name = "try-lock" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "unindent" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" + +[[package]] +name = "want" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" +dependencies = [ + "try-lock", +] + +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + +[[package]] +name = "windows-sys" +version = "0.52.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" +dependencies = [ + "windows-targets 0.52.4", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + +[[package]] +name = "windows-targets" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +dependencies = [ + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" diff --git a/flytekit/clients/friendly_rs.py b/flyrs/friendly.py similarity index 100% rename from flytekit/clients/friendly_rs.py rename to flyrs/friendly.py diff --git a/flyrs/remote.py b/flyrs/remote.py new file mode 100644 index 0000000000..8d3587ee24 --- /dev/null +++ b/flyrs/remote.py @@ -0,0 +1,2353 @@ +""" +This module provides the ``FlyteRemote`` object, which is the end-user's main starting point for interacting +with a Flyte backend in an interactive and programmatic way. This of this experience as kind of like the web UI +but in Python object form. +""" +from __future__ import annotations + +import base64 +import configparser +import functools +import hashlib +import os +import pathlib +import tempfile +import time +import typing +import uuid +from base64 import b64encode +from collections import OrderedDict +from dataclasses import asdict, dataclass +from datetime import datetime, timedelta +from typing import Dict + +import click +import fsspec +import requests +from flyteidl.admin.signal_pb2 import Signal, SignalListRequest, SignalSetRequest +from flyteidl.core import literals_pb2 + +from flytekit import ImageSpec +from friendly import RustSynchronousFlyteClient +from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions +from flytekit.configuration import Config, FastSerializationSettings, ImageConfig, SerializationSettings +from flytekit.core import constants, utils +from flytekit.core.artifact import Artifact +from flytekit.core.base_task import PythonTask +from flytekit.core.context_manager import FlyteContext, FlyteContextManager +from flytekit.core.data_persistence import FileAccessProvider +from flytekit.core.launch_plan import LaunchPlan, ReferenceLaunchPlan +from flytekit.core.python_auto_container import PythonAutoContainerTask +from flytekit.core.reference_entity import ReferenceSpec +from flytekit.core.task import ReferenceTask +from flytekit.core.tracker import extract_task_module +from flytekit.core.type_engine import LiteralsResolver, TypeEngine +from flytekit.core.workflow import ReferenceWorkflow, WorkflowBase, WorkflowFailurePolicy +from flytekit.exceptions import user as user_exceptions +from flytekit.exceptions.user import ( + FlyteEntityAlreadyExistsException, + FlyteEntityNotExistException, + FlyteValueException, +) +from flytekit.loggers import logger +from flytekit.models import common as common_models +from flytekit.models import filters as filter_models +from flytekit.models import launch_plan as launch_plan_models +from flytekit.models import literals as literal_models +from flytekit.models import task as task_models +from flytekit.models import types as type_models +from flytekit.models.admin import common as admin_common_models +from flytekit.models.admin import workflow as admin_workflow_models +from flytekit.models.admin.common import Sort +from flytekit.models.core import identifier as id_models +from flytekit.models.core import workflow as workflow_model +from flytekit.models.core.identifier import Identifier, ResourceType, SignalIdentifier, WorkflowExecutionIdentifier +from flytekit.models.core.workflow import BranchNode, Node, NodeMetadata +from flytekit.models.execution import ( + ClusterAssignment, + ExecutionMetadata, + ExecutionSpec, + NodeExecutionGetDataResponse, + NotificationList, + WorkflowExecutionGetDataResponse, +) +from flytekit.models.launch_plan import LaunchPlanState +from flytekit.models.literals import Literal, LiteralMap +from flytekit.remote.backfill import create_backfill_workflow +from flytekit.remote.data import download_literal +from flytekit.remote.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow +from flytekit.remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution +from flytekit.remote.interface import TypedInterface +from flytekit.remote.lazy_entity import LazyEntity +from flytekit.remote.remote_callable import RemoteEntity +from flytekit.remote.remote_fs import get_flyte_fs +from flytekit.tools.fast_registration import fast_package +from flytekit.tools.interactive import ipython_check +from flytekit.tools.script_mode import _find_project_root, compress_scripts, hash_file +from flytekit.tools.translator import ( + FlyteControlPlaneEntity, + FlyteLocalEntity, + Options, + get_serializable, + get_serializable_launch_plan, +) + +if typing.TYPE_CHECKING: + try: + from IPython.core.display import HTML + except ImportError: + ... + +ExecutionDataResponse = typing.Union[WorkflowExecutionGetDataResponse, NodeExecutionGetDataResponse] + +MOST_RECENT_FIRST = admin_common_models.Sort("created_at", admin_common_models.Sort.Direction.DESCENDING) + + +class RegistrationSkipped(Exception): + """ + RegistrationSkipped error is raised when trying to register an entity that is not registrable. + """ + + pass + + +@dataclass +class ResolvedIdentifiers: + project: str + domain: str + name: str + version: str + + +def _get_latest_version(list_entities_method: typing.Callable, project: str, domain: str, name: str): + named_entity = common_models.NamedEntityIdentifier(project, domain, name) + entity_list, _ = list_entities_method( + named_entity, + limit=1, + sort_by=Sort("created_at", Sort.Direction.DESCENDING), + ) + admin_entity = None if not entity_list else entity_list[0] + if not admin_entity: + raise user_exceptions.FlyteEntityNotExistException("Named entity {} not found".format(named_entity)) + return admin_entity.id.version + + +def _get_entity_identifier( + list_entities_method: typing.Callable, + resource_type: int, # from flytekit.models.core.identifier.ResourceType + project: str, + domain: str, + name: str, + version: typing.Optional[str] = None, +): + return Identifier( + resource_type, + project, + domain, + name, + version if version is not None else _get_latest_version(list_entities_method, project, domain, name), + ) + + +def _get_git_repo_url(source_path): + """ + Get git repo URL from remote.origin.url + """ + try: + git_config = source_path / ".git" / "config" + if not git_config.exists(): + raise ValueError(f"{source_path} is not a git repo") + + config = configparser.ConfigParser() + config.read(git_config) + url = config['remote "origin"']["url"] + + if url.startswith("git@"): + # url format: git@github.com:flytekit/flytekit.git + prefix_len, suffix_len = len("git@"), len(".git") + return url[prefix_len:-suffix_len].replace(":", "/") + elif url.startswith("https://"): + # url format: https://github.com/flytekit/flytekit + prefix_len = len("https://") + return url[prefix_len:] + elif url.startswith("http://"): + # url format: http://github.com/flytekit/flytekit + prefix_len = len("http://") + return url[prefix_len:] + else: + raise ValueError("Unable to parse url") + + except Exception as e: + logger.debug(str(e)) + return "" + + +class RustFlyteRemote(object): + """Main entrypoint for programmatically accessing a Flyte remote backend. + + The term 'remote' is synonymous with 'backend' or 'deployment' and refers to a hosted instance of the + Flyte platform, which comes with a Flyte Admin server on some known URI. + """ + + def __init__( + self, + config: Config, + default_project: typing.Optional[str] = None, + default_domain: typing.Optional[str] = None, + data_upload_location: str = "flyte://my-s3-bucket/", + **kwargs, + ): + """Initialize a FlyteRemote object. + + :type kwargs: All arguments that can be passed to create the SynchronousFlyteClient. These are usually grpc + parameters, if you want to customize credentials, ssl handling etc. + :param default_project: default project to use when fetching or executing flyte entities. + :param default_domain: default domain to use when fetching or executing flyte entities. + :param data_upload_location: this is where all the default data will be uploaded when providing inputs. + The default location - `s3://my-s3-bucket/data` works for sandbox/demo environment. Please override this for non-sandbox cases. + """ + if config is None or config.platform is None or config.platform.endpoint is None: + raise user_exceptions.FlyteAssertion("Flyte endpoint should be provided.") + + if data_upload_location is None: + data_upload_location = FlyteContext.current_context().file_access.raw_output_prefix + self._kwargs = kwargs + self._client_initialized = False + self._config = config + # read config files, env vars, host, ssl options for admin client + self._default_project = default_project + self._default_domain = default_domain + + fsspec.register_implementation("flyte", get_flyte_fs(remote=self), clobber=True) + + self._file_access = FileAccessProvider( + local_sandbox_dir=os.path.join(config.local_sandbox_path, "control_plane_metadata"), + raw_output_prefix=data_upload_location, + data_config=config.data_config, + ) + + # Save the file access object locally, build a context for it and save that as well. + self._ctx = FlyteContextManager.current_context().with_file_access(self._file_access).build() + + @property + def context(self) -> FlyteContext: + return self._ctx + + @property + def client(self): + """Return a SynchronousFlyteClient for additional operations.""" + if not self._client_initialized: + self._client = RustSynchronousFlyteClient() + self._client_initialized = True + return self._client + + @property + def default_project(self) -> str: + """Default project to use when fetching or executing flyte entities.""" + return self._default_project + + @property + def default_domain(self) -> str: + """Default project to use when fetching or executing flyte entities.""" + return self._default_domain + + @property + def config(self) -> Config: + """Image config.""" + return self._config + + @property + def file_access(self) -> FileAccessProvider: + """File access provider to use for offloading non-literal inputs/outputs.""" + return self._file_access + + def get( + self, flyte_uri: typing.Optional[str] = None + ) -> typing.Optional[typing.Union[LiteralsResolver, Literal, HTML, bytes]]: + """ + General function that works with flyte tiny urls. This can return outputs (in the form of LiteralsResolver, or + individual Literals for singular requests), or HTML if passed a deck link, or bytes containing HTML, + if ipython is not available locally. + """ + if flyte_uri is None: + raise user_exceptions.FlyteUserException("flyte_uri cannot be empty") + ctx = self._ctx or FlyteContextManager.current_context() + try: + data_response = self.client.get_data(flyte_uri) + + if data_response.HasField("literal_map"): + lm = LiteralMap.from_flyte_idl(data_response.literal_map) + return LiteralsResolver(lm.literals) + elif data_response.HasField("literal"): + return Literal.from_flyte_idl(data_response.literal) + elif data_response.HasField("pre_signed_urls"): + if len(data_response.pre_signed_urls.signed_url) == 0: + raise ValueError(f"Flyte url {flyte_uri} resolved to empty download link") + d = data_response.pre_signed_urls.signed_url[0] + logger.debug(f"Download link is {d}") + fs = ctx.file_access.get_filesystem_for_path(d) + + # If the venv has IPython, then return IPython's HTML + if ipython_check(): + from IPython.core.display import HTML + + logger.debug(f"IPython found, returning HTML from {flyte_uri}") + with fs.open(d, "rb") as r: + html = HTML(str(r.read())) + return html + # If not return bytes + else: + logger.debug(f"IPython not found, returning HTML as bytes from {flyte_uri}") + return fs.open(d, "rb").read() + + except user_exceptions.FlyteUserException as e: + logger.info(f"Error from Flyte backend when trying to fetch data: {e.__cause__}") + + logger.info(f"Nothing found from {flyte_uri}") + + def remote_context(self): + """Context manager with remote-specific configuration.""" + return FlyteContextManager.with_context( + FlyteContextManager.current_context().with_file_access(self.file_access) + ) + + def fetch_task_lazy( + self, project: str = None, domain: str = None, name: str = None, version: str = None + ) -> LazyEntity: + """ + Similar to fetch_task, just that it returns a LazyEntity, which will fetch the workflow lazily. + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + + def _fetch(): + return self.fetch_task(project=project, domain=domain, name=name, version=version) + + return LazyEntity(name=name, getter=_fetch) + + def fetch_task(self, project: str = None, domain: str = None, name: str = None, version: str = None) -> FlyteTask: + """Fetch a task entity from flyte admin. + + :param project: fetch entity from this project. If None, uses the default_project attribute. + :param domain: fetch entity from this domain. If None, uses the default_domain attribute. + :param name: fetch entity with matching name. + :param version: fetch entity with matching version. If None, gets the latest version of the entity. + :returns: :class:`~flytekit.remote.tasks.task.FlyteTask` + + :raises: FlyteAssertion if name is None + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + task_id = _get_entity_identifier( + self.client.list_tasks_paginated, + ResourceType.TASK, + project or self.default_project, + domain or self.default_domain, + name, + version, + ) + admin_task = self.client.get_task(task_id) + flyte_task = FlyteTask.promote_from_model(admin_task.closure.compiled_task.template) + flyte_task.template._id = task_id + return flyte_task + + def fetch_workflow_lazy( + self, project: str = None, domain: str = None, name: str = None, version: str = None + ) -> LazyEntity[FlyteWorkflow]: + """ + Similar to fetch_workflow, just that it returns a LazyEntity, which will fetch the workflow lazily. + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + + def _fetch(): + return self.fetch_workflow(project, domain, name, version) + + return LazyEntity(name=name, getter=_fetch) + + def fetch_workflow( + self, project: str = None, domain: str = None, name: str = None, version: str = None + ) -> FlyteWorkflow: + """ + Fetch a workflow entity from flyte admin. + :param project: fetch entity from this project. If None, uses the default_project attribute. + :param domain: fetch entity from this domain. If None, uses the default_domain attribute. + :param name: fetch entity with matching name. + :param version: fetch entity with matching version. If None, gets the latest version of the entity. + :raises: FlyteAssertion if name is None + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + workflow_id = _get_entity_identifier( + self.client.list_workflows_paginated, + ResourceType.WORKFLOW, + project or self.default_project, + domain or self.default_domain, + name, + version, + ) + + admin_workflow = self.client.get_workflow(workflow_id) + compiled_wf = admin_workflow.closure.compiled_workflow + + wf_templates = [compiled_wf.primary.template] + wf_templates.extend([swf.template for swf in compiled_wf.sub_workflows]) + + node_launch_plans = {} + + def find_launch_plan( + lp_ref: id_models, node_launch_plans: Dict[id_models, launch_plan_models.LaunchPlanSpec] + ) -> None: + if lp_ref not in node_launch_plans: + admin_launch_plan = self.client.get_launch_plan(lp_ref) + node_launch_plans[lp_ref] = admin_launch_plan.spec + + for wf_template in wf_templates: + for node in FlyteWorkflow.get_non_system_nodes(wf_template.nodes): + if node.workflow_node is not None and node.workflow_node.launchplan_ref is not None: + lp_ref = node.workflow_node.launchplan_ref + find_launch_plan(lp_ref, node_launch_plans) + + # Inspect conditional branch nodes for launch plans + def get_launch_plan_from_branch( + branch_node: BranchNode, node_launch_plans: Dict[id_models, launch_plan_models.LaunchPlanSpec] + ) -> None: + def get_launch_plan_from_then_node( + child_then_node: Node, node_launch_plans: Dict[id_models, launch_plan_models.LaunchPlanSpec] + ) -> None: + # then_node could have nested branch_node or be a normal then_node + if child_then_node.branch_node: + get_launch_plan_from_branch(child_then_node.branch_node, node_launch_plans) + elif child_then_node.workflow_node and child_then_node.workflow_node.launchplan_ref: + lp_ref = child_then_node.workflow_node.launchplan_ref + find_launch_plan(lp_ref, node_launch_plans) + + if branch_node and branch_node.if_else: + branch = branch_node.if_else + if branch.case and branch.case.then_node: + child_then_node = branch.case.then_node + get_launch_plan_from_then_node(child_then_node, node_launch_plans) + if branch.other: + for o in branch.other: + if o.then_node: + child_then_node = o.then_node + get_launch_plan_from_then_node(child_then_node, node_launch_plans) + if branch.else_node: + # else_node could have nested conditional branch_node + if branch.else_node.branch_node: + get_launch_plan_from_branch(branch.else_node.branch_node, node_launch_plans) + elif branch.else_node.workflow_node and branch.else_node.workflow_node.launchplan_ref: + lp_ref = branch.else_node.workflow_node.launchplan_ref + find_launch_plan(lp_ref, node_launch_plans) + + if node.branch_node: + get_launch_plan_from_branch(node.branch_node, node_launch_plans) + return FlyteWorkflow.promote_from_closure(compiled_wf, node_launch_plans) + + def fetch_launch_plan( + self, project: str = None, domain: str = None, name: str = None, version: str = None + ) -> FlyteLaunchPlan: + """Fetch a launchplan entity from flyte admin. + + :param project: fetch entity from this project. If None, uses the default_project attribute. + :param domain: fetch entity from this domain. If None, uses the default_domain attribute. + :param name: fetch entity with matching name. + :param version: fetch entity with matching version. If None, gets the latest version of the entity. + :returns: :class:`~flytekit.remote.launch_plan.FlyteLaunchPlan` + + :raises: FlyteAssertion if name is None + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + launch_plan_id = _get_entity_identifier( + self.client.list_launch_plans_paginated, + ResourceType.LAUNCH_PLAN, + project or self.default_project, + domain or self.default_domain, + name, + version, + ) + admin_launch_plan = self.client.get_launch_plan(launch_plan_id) + flyte_launch_plan = FlyteLaunchPlan.promote_from_model(launch_plan_id, admin_launch_plan.spec) + + wf_id = flyte_launch_plan.workflow_id + workflow = self.fetch_workflow(wf_id.project, wf_id.domain, wf_id.name, wf_id.version) + flyte_launch_plan._interface = workflow.interface + flyte_launch_plan._flyte_workflow = workflow + + return flyte_launch_plan + + def fetch_execution(self, project: str = None, domain: str = None, name: str = None) -> FlyteWorkflowExecution: + """Fetch a workflow execution entity from flyte admin. + + :param project: fetch entity from this project. If None, uses the default_project attribute. + :param domain: fetch entity from this domain. If None, uses the default_domain attribute. + :param name: fetch entity with matching name. + :returns: :class:`~flytekit.remote.workflow_execution.FlyteWorkflowExecution` + + :raises: FlyteAssertion if name is None + """ + if name is None: + raise user_exceptions.FlyteAssertion("the 'name' argument must be specified.") + execution = FlyteWorkflowExecution.promote_from_model( + self.client.get_execution( + WorkflowExecutionIdentifier( + project or self.default_project, + domain or self.default_domain, + name, + ) + ) + ) + return self.sync_execution(execution) + + ###################### + # Listing Entities # + ###################### + + def list_signals( + self, + execution_name: str, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + limit: int = 100, + filters: typing.Optional[typing.List[filter_models.Filter]] = None, + ) -> typing.List[Signal]: + """ + :param execution_name: The name of the execution. This is the tailend of the URL when looking at the workflow execution. + :param project: The execution project, will default to the Remote's default project. + :param domain: The execution domain, will default to the Remote's default domain. + :param limit: The number of signals to fetch + :param filters: Optional list of filters + """ + wf_exec_id = WorkflowExecutionIdentifier( + project=project or self.default_project, domain=domain or self.default_domain, name=execution_name + ) + req = SignalListRequest(workflow_execution_id=wf_exec_id.to_flyte_idl(), limit=limit, filters=filters) + resp = self.client.list_signals(req) + s = resp.signals + return s + + def set_signal( + self, + signal_id: str, + execution_name: str, + value: typing.Union[literal_models.Literal, typing.Any], + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + python_type: typing.Optional[typing.Type] = None, + literal_type: typing.Optional[type_models.LiteralType] = None, + ): + """ + :param signal_id: The name of the signal, this is the key used in the approve() or wait_for_input() call. + :param execution_name: The name of the execution. This is the tail-end of the URL when looking + at the workflow execution. + :param value: This is either a Literal or a Python value which FlyteRemote will invoke the TypeEngine to + convert into a Literal. This argument is only value for wait_for_input type signals. + :param project: The execution project, will default to the Remote's default project. + :param domain: The execution domain, will default to the Remote's default domain. + :param python_type: Provide a python type to help with conversion if the value you provided is not a Literal. + :param literal_type: Provide a Flyte literal type to help with conversion if the value you provided + is not a Literal + """ + wf_exec_id = WorkflowExecutionIdentifier( + project=project or self.default_project, domain=domain or self.default_domain, name=execution_name + ) + if isinstance(value, Literal): + logger.debug(f"Using provided {value} as existing Literal value") + lit = value + else: + lt = literal_type or ( + TypeEngine.to_literal_type(python_type) if python_type else TypeEngine.to_literal_type(type(value)) + ) + lit = TypeEngine.to_literal(self.context, value, python_type or type(value), lt) + logger.debug(f"Converted {value} to literal {lit} using literal type {lt}") + + req = SignalSetRequest(id=SignalIdentifier(signal_id, wf_exec_id).to_flyte_idl(), value=lit.to_flyte_idl()) + + # Response is empty currently, nothing to give back to the user. + self.client.set_signal(req) + + def recent_executions( + self, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + limit: typing.Optional[int] = 100, + ) -> typing.List[FlyteWorkflowExecution]: + # Ignore token for now + exec_models, _ = self.client.list_executions_paginated( + project or self.default_project, + domain or self.default_domain, + limit, + sort_by=MOST_RECENT_FIRST, + ) + return [FlyteWorkflowExecution.promote_from_model(e) for e in exec_models] + + def list_tasks_by_version( + self, + version: str, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + limit: typing.Optional[int] = 100, + ) -> typing.List[FlyteTask]: + if not version: + raise ValueError("Must specify a version") + + named_entity_id = common_models.NamedEntityIdentifier( + project=project or self.default_project, + domain=domain or self.default_domain, + ) + # Ignore token for now + t_models, _ = self.client.list_tasks_paginated( + named_entity_id, + filters=[filter_models.Filter.from_python_std(f"eq(version,{version})")], + limit=limit, + ) + return [FlyteTask.promote_from_model(t.closure.compiled_task.template) for t in t_models] + + ##################### + # Register Entities # + ##################### + + def _resolve_identifier(self, t: int, name: str, version: str, ss: SerializationSettings) -> Identifier: + ident = Identifier( + resource_type=t, + project=ss.project if ss and ss.project else self.default_project, + domain=ss.domain if ss and ss.domain else self.default_domain, + name=name, + version=version or ss.version, + ) + if not ident.project or not ident.domain or not ident.name or not ident.version: + raise ValueError( + f"To register a new {ident.resource_type}, (project, domain, name, version) required, " + f"received ({ident.project}, {ident.domain}, {ident.name}, {ident.version})." + ) + return ident + + def raw_register( + self, + cp_entity: FlyteControlPlaneEntity, + settings: SerializationSettings, + version: str, + create_default_launchplan: bool = True, + options: Options = None, + og_entity: FlyteLocalEntity = None, + ) -> typing.Optional[Identifier]: + """ + Raw register method, can be used to register control plane entities. Usually if you have a Flyte Entity like a + WorkflowBase, Task, LaunchPlan then use other methods. This should be used only if you have already serialized entities + + :param cp_entity: The controlplane "serializable" version of a flyte entity. This is in the form that FlyteAdmin + understands. + :param settings: SerializationSettings to be used for registration - especially to identify the id + :param version: Version to be registered + :param create_default_launchplan: boolean that indicates if a default launch plan should be created + :param options: Options to be used if registering a default launch plan + :param og_entity: Pass in the original workflow (flytekit type) if create_default_launchplan is true + :return: Identifier of the created entity + """ + if isinstance(cp_entity, RemoteEntity): + if isinstance(cp_entity, (FlyteWorkflow, FlyteTask)): + if not cp_entity.should_register: + logger.debug(f"Skipping registration of remote entity: {cp_entity.name}") + raise RegistrationSkipped(f"Remote task/Workflow {cp_entity.name} is not registrable.") + else: + logger.debug(f"Skipping registration of remote entity: {cp_entity.name}") + raise RegistrationSkipped(f"Remote task/Workflow {cp_entity.name} is not registrable.") + + if isinstance( + cp_entity, + ( + workflow_model.Node, + workflow_model.WorkflowNode, + workflow_model.BranchNode, + workflow_model.TaskNode, + ), + ): + logger.debug("Ignoring nodes for registration.") + return None + + elif isinstance(cp_entity, ReferenceSpec): + logger.debug(f"Skipping registration of Reference entity, name: {cp_entity.template.id.name}") + return None + + if isinstance(cp_entity, task_models.TaskSpec): + if isinstance(cp_entity, FlyteTask): + version = cp_entity.id.version + ident = self._resolve_identifier(ResourceType.TASK, cp_entity.template.id.name, version, settings) + try: + self.client.create_task(task_identifer=ident, task_spec=cp_entity) + except FlyteEntityAlreadyExistsException: + logger.info(f" {ident} Already Exists!") + return ident + + if isinstance(cp_entity, admin_workflow_models.WorkflowSpec): + if isinstance(cp_entity, FlyteWorkflow): + version = cp_entity.id.version + ident = self._resolve_identifier(ResourceType.WORKFLOW, cp_entity.template.id.name, version, settings) + try: + self.client.create_workflow(workflow_identifier=ident, workflow_spec=cp_entity) + except FlyteEntityAlreadyExistsException: + logger.info(f" {ident} Already Exists!") + + if create_default_launchplan: + if not og_entity: + raise user_exceptions.FlyteValueException( + "To create default launch plan, please pass in the original flytekit workflow `og_entity`" + ) + + # Let us also create a default launch-plan, ideally the default launchplan should be added + # to the orderedDict, but we do not. + self.file_access._get_upload_signed_url_fn = functools.partial( + self.client.get_upload_signed_url, project=settings.project, domain=settings.domain + ) + default_lp = LaunchPlan.get_default_launch_plan(self.context, og_entity) + lp_entity = get_serializable_launch_plan( + OrderedDict(), + settings, + default_lp, + recurse_downstream=False, + options=options, + ) + try: + self.client.create_launch_plan(lp_entity.id, lp_entity.spec) + except FlyteEntityAlreadyExistsException: + logger.info(f" {lp_entity.id} Already Exists!") + return ident + + if isinstance(cp_entity, launch_plan_models.LaunchPlan): + ident = self._resolve_identifier(ResourceType.LAUNCH_PLAN, cp_entity.id.name, version, settings) + try: + self.client.create_launch_plan(launch_plan_identifer=ident, launch_plan_spec=cp_entity.spec) + except FlyteEntityAlreadyExistsException: + logger.info(f" {ident} Already Exists!") + return ident + + raise AssertionError(f"Unknown entity of type {type(cp_entity)}") + + def _serialize_and_register( + self, + entity: FlyteLocalEntity, + settings: typing.Optional[SerializationSettings], + version: str, + options: typing.Optional[Options] = None, + create_default_launchplan: bool = True, + ) -> Identifier: + """ + This method serializes and register the given Flyte entity + :return: Identifier of the registered entity + """ + m = OrderedDict() + # Create dummy serialization settings for now. + # TODO: Clean this up by using lazy usage of serialization settings in translator.py + serialization_settings = settings + is_dummy_serialization_setting = False + if not settings: + serialization_settings = SerializationSettings( + ImageConfig.auto_default_image(), + project=self.default_project, + domain=self.default_domain, + version=version, + ) + is_dummy_serialization_setting = True + + if serialization_settings.version is None: + serialization_settings.version = version + + _ = get_serializable(m, settings=serialization_settings, entity=entity, options=options) + + ident = None + for entity, cp_entity in m.items(): + if not isinstance(cp_entity, admin_workflow_models.WorkflowSpec) and is_dummy_serialization_setting: + # Only in the case of workflows can we use the dummy serialization settings. + raise user_exceptions.FlyteValueException( + settings, + f"No serialization settings set, but workflow contains entities that need to be registered. {cp_entity.id.name}", + ) + + try: + ident = self.raw_register( + cp_entity, + settings=settings, + version=version, + create_default_launchplan=create_default_launchplan, + options=options, + og_entity=entity, + ) + except RegistrationSkipped: + pass + + return ident + + def register_task( + self, + entity: PythonTask, + serialization_settings: typing.Optional[SerializationSettings] = None, + version: typing.Optional[str] = None, + ) -> FlyteTask: + """ + Register a qualified task (PythonTask) with Remote + For any conflicting parameters method arguments are regarded as overrides + + :param entity: PythonTask can be either @task or a instance of a Task class + :param serialization_settings: Settings that will be used to override various serialization parameters. + :param version: version that will be used to register. If not specified will default to using the serialization settings default + :return: + """ + # Create a default serialization settings object if not provided + # It makes registration easier for the user + if serialization_settings is None: + _, _, _, module_file = extract_task_module(entity) + project_root = _find_project_root(module_file) + serialization_settings = SerializationSettings( + image_config=ImageConfig.auto_default_image(), + source_root=project_root, + ) + + ident = self._serialize_and_register(entity=entity, settings=serialization_settings, version=version) + ft = self.fetch_task( + ident.project, + ident.domain, + ident.name, + ident.version, + ) + ft._python_interface = entity.python_interface + return ft + + def register_workflow( + self, + entity: WorkflowBase, + serialization_settings: typing.Optional[SerializationSettings] = None, + version: typing.Optional[str] = None, + default_launch_plan: typing.Optional[bool] = True, + options: typing.Optional[Options] = None, + ) -> FlyteWorkflow: + """ + Use this method to register a workflow. + :param version: version for the entity to be registered as + :param entity: The workflow to be registered + :param serialization_settings: The serialization settings to be used + :param default_launch_plan: This should be true if a default launch plan should be created for the workflow + :param options: Additional execution options that can be configured for the default launchplan + :return: + """ + ident = self._resolve_identifier(ResourceType.WORKFLOW, entity.name, version, serialization_settings) + if serialization_settings: + b = serialization_settings.new_builder() + b.project = ident.project + b.domain = ident.domain + b.version = ident.version + serialization_settings = b.build() + ident = self._serialize_and_register(entity, serialization_settings, version, options, default_launch_plan) + fwf = self.fetch_workflow(ident.project, ident.domain, ident.name, ident.version) + fwf._python_interface = entity.python_interface + return fwf + + def fast_package(self, root: os.PathLike, deref_symlinks: bool = True, output: str = None) -> (bytes, str): + """ + Packages the given paths into an installable zip and returns the md5_bytes and the URL of the uploaded location + :param root: path to the root of the package system that should be uploaded + :param output: output path. Optional, will default to a tempdir + :param deref_symlinks: if symlinks should be dereferenced. Defaults to True + :return: md5_bytes, url + """ + # Create a zip file containing all the entries. + zip_file = fast_package(root, output, deref_symlinks) + md5_bytes, _, _ = hash_file(pathlib.Path(zip_file)) + + # Upload zip file to Admin using FlyteRemote. + return self.upload_file(pathlib.Path(zip_file)) + + def upload_file( + self, + to_upload: pathlib.Path, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + filename_root: typing.Optional[str] = None, + ) -> typing.Tuple[bytes, str]: + """ + Function will use remote's client to hash and then upload the file using Admin's data proxy service. + + :param to_upload: Must be a single file + :param project: Project to upload under, if not supplied will use the remote's default + :param domain: Domain to upload under, if not specified will use the remote's default + :param filename_root: If provided will be used as the root of the filename. If not, Admin will use a hash + :return: The uploaded location. + """ + if not to_upload.is_file(): + raise ValueError(f"{to_upload} is not a single file, upload arg must be a single file.") + md5_bytes, str_digest, _ = hash_file(to_upload) + logger.debug(f"Text hash of file to upload is {str_digest}") + + upload_location = self.client.get_upload_signed_url( + project=project or self.default_project, + domain=domain or self.default_domain, + content_md5=md5_bytes, + filename=to_upload.name, + filename_root=filename_root, + ) + + extra_headers = self.get_extra_headers_for_protocol(upload_location.native_url) + extra_headers.update(upload_location.headers) + encoded_md5 = b64encode(md5_bytes) + with open(str(to_upload), "+rb") as local_file: + content = local_file.read() + content_length = len(content) + headers = {"Content-Length": str(content_length), "Content-MD5": encoded_md5} + headers.update(extra_headers) + rsp = requests.put( + upload_location.signed_url, + data=content, + headers=headers, + verify=False + if self._config.platform.insecure_skip_verify is True + else self._config.platform.ca_cert_file_path, + ) + + # Check both HTTP 201 and 200, because some storage backends (e.g. Azure) return 201 instead of 200. + if rsp.status_code not in (requests.codes["OK"], requests.codes["created"]): + raise FlyteValueException( + rsp.status_code, + f"Request to send data {upload_location.signed_url} failed.\nResponse: {rsp.text}", + ) + + logger.debug(f"Uploading {to_upload} to {upload_location.signed_url} native url {upload_location.native_url}") + + return md5_bytes, upload_location.native_url + + @staticmethod + def _version_from_hash( + md5_bytes: bytes, + serialization_settings: SerializationSettings, + *additional_context: str, + ) -> str: + """ + The md5 version that we send to S3/GCS has to match the file contents exactly, + but we don't have to use it when registering with the Flyte backend. + To avoid changes in the For that add the hash of the compilation settings to hash of file + + :param md5_bytes: + :param serialization_settings: + :param additional_context: This is for additional context to factor into the version computation, + meant for objects (like Options for instance) that don't easily consistently stringify. + :return: + """ + from flytekit import __version__ + + additional_context = additional_context or [] + + h = hashlib.md5(md5_bytes) + h.update(bytes(serialization_settings.to_json(), "utf-8")) + h.update(bytes(__version__, "utf-8")) + + for s in additional_context: + h.update(bytes(s, "utf-8")) + + # Omit the character '=' from the version as that's essentially padding used by the base64 encoding + # and does not increase entropy of the hash while making it very inconvenient to copy-and-paste. + return base64.urlsafe_b64encode(h.digest()).decode("ascii").rstrip("=") + + def register_script( + self, + entity: typing.Union[WorkflowBase, PythonTask], + image_config: typing.Optional[ImageConfig] = None, + version: typing.Optional[str] = None, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + destination_dir: str = ".", + copy_all: bool = False, + default_launch_plan: bool = True, + options: typing.Optional[Options] = None, + source_path: typing.Optional[str] = None, + module_name: typing.Optional[str] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + ) -> typing.Union[FlyteWorkflow, FlyteTask]: + """ + Use this method to register a workflow via script mode. + :param destination_dir: The destination directory where the workflow will be copied to. + :param copy_all: If true, the entire source directory will be copied over to the destination directory. + :param domain: The domain to register the workflow in. + :param project: The project to register the workflow in. + :param image_config: The image config to use for the workflow. + :param version: version for the entity to be registered as + :param entity: The workflow to be registered or the task to be registered + :param default_launch_plan: This should be true if a default launch plan should be created for the workflow + :param options: Additional execution options that can be configured for the default launchplan + :param source_path: The root of the project path + :param module_name: the name of the module + :param envs: Environment variables to be passed to the serialization + :return: + """ + if image_config is None: + image_config = ImageConfig.auto_default_image() + + with tempfile.TemporaryDirectory() as tmp_dir: + if copy_all: + md5_bytes, upload_native_url = self.fast_package(pathlib.Path(source_path), False, tmp_dir) + else: + archive_fname = pathlib.Path(os.path.join(tmp_dir, "script_mode.tar.gz")) + compress_scripts(source_path, str(archive_fname), module_name) + md5_bytes, upload_native_url = self.upload_file( + archive_fname, project or self.default_project, domain or self.default_domain + ) + + serialization_settings = SerializationSettings( + project=project, + domain=domain, + image_config=image_config, + git_repo=_get_git_repo_url(source_path), + env=envs, + fast_serialization_settings=FastSerializationSettings( + enabled=True, + destination_dir=destination_dir, + distribution_location=upload_native_url, + ), + source_root=source_path, + ) + + if version is None: + + def _get_image_names(entity: typing.Union[PythonAutoContainerTask, WorkflowBase]) -> typing.List[str]: + if isinstance(entity, PythonAutoContainerTask) and isinstance(entity.container_image, ImageSpec): + return [entity.container_image.image_name()] + if isinstance(entity, WorkflowBase): + image_names = [] + for n in entity.nodes: + image_names.extend(_get_image_names(n.flyte_entity)) + return image_names + return [] + + # The md5 version that we send to S3/GCS has to match the file contents exactly, + # but we don't have to use it when registering with the Flyte backend. + # For that add the hash of the compilation settings to hash of file + version = self._version_from_hash(md5_bytes, serialization_settings, *_get_image_names(entity)) + + if isinstance(entity, PythonTask): + return self.register_task(entity, serialization_settings, version) + return self.register_workflow(entity, serialization_settings, version, default_launch_plan, options) + + def register_launch_plan( + self, + entity: LaunchPlan, + version: str, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + ) -> FlyteLaunchPlan: + """ + Register a given launchplan, possibly applying overrides from the provided options. + :param entity: Launchplan to be registered + :param version: + :param project: Optionally provide a project, if not already provided in flyteremote constructor or a separate one + :param domain: Optionally provide a domain, if not already provided in FlyteRemote constructor or a separate one + :param options: + :return: + """ + ss = SerializationSettings( + image_config=ImageConfig(), + project=project or self.default_project, + domain=domain or self.default_domain, + version=version, + ) + + ident = self._resolve_identifier(ResourceType.LAUNCH_PLAN, entity.name, version, ss) + m = OrderedDict() + idl_lp = get_serializable_launch_plan(m, ss, entity, recurse_downstream=False, options=options) + try: + self.client.create_launch_plan(ident, idl_lp.spec) + except FlyteEntityAlreadyExistsException: + logger.debug("Launchplan already exists, ignoring") + flp = self.fetch_launch_plan(ident.project, ident.domain, ident.name, ident.version) + flp._python_interface = entity.python_interface + return flp + + #################### + # Execute Entities # + #################### + + def _execute( + self, + entity: typing.Union[FlyteTask, FlyteWorkflow, FlyteLaunchPlan], + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Common method for execution across all entities. + + :param flyte_id: entity identifier + :param inputs: dictionary mapping argument names to values + :param project: project on which to execute the entity referenced by flyte_id + :param domain: domain on which to execute the entity referenced by flyte_id + :param execution_name: name of the execution + :param wait: if True, waits for execution to complete + :param type_hints: map of python types to inputs so that the TypeEngine knows how to convert the input values + into Flyte Literals. + :param overwrite_cache: Allows for all cached values of a workflow and its tasks to be overwritten + for a single execution. If enabled, all calculations are performed even if cached results would + be available, overwriting the stored data once execution finishes successfully. + :param envs: Environment variables to set for the execution. + :param tags: Tags to set for the execution. + :param cluster_pool: Specify cluster pool on which newly created execution should be placed. + :returns: :class:`~flytekit.remote.workflow_execution.FlyteWorkflowExecution` + """ + if execution_name is not None and execution_name_prefix is not None: + raise ValueError("Only one of execution_name and execution_name_prefix can be set, but got both set") + execution_name_prefix = execution_name_prefix + "-" if execution_name_prefix is not None else None + execution_name = execution_name or (execution_name_prefix or "f") + uuid.uuid4().hex[:19] + if not options: + options = Options() + if options.disable_notifications is not None: + if options.disable_notifications: + notifications = None + else: + notifications = NotificationList(options.notifications) + else: + notifications = NotificationList([]) + + type_hints = type_hints or {} + literal_map = {} + with self.remote_context() as ctx: + input_flyte_type_map = entity.interface.inputs + + for k, v in inputs.items(): + if input_flyte_type_map.get(k) is None: + raise user_exceptions.FlyteValueException( + k, f"The {entity.__class__.__name__} doesn't have this input key." + ) + if isinstance(v, Literal): + lit = v + elif isinstance(v, Artifact): + raise user_exceptions.FlyteValueException(v, "Running with an artifact object is not yet possible.") + else: + if k not in type_hints: + try: + type_hints[k] = TypeEngine.guess_python_type(input_flyte_type_map[k].type) + except ValueError: + logger.debug(f"Could not guess type for {input_flyte_type_map[k].type}, skipping...") + variable = entity.interface.inputs.get(k) + hint = type_hints[k] + self.file_access._get_upload_signed_url_fn = functools.partial( + self.client.get_upload_signed_url, + project=project or self.default_project, + domain=domain or self.default_domain, + ) + lit = TypeEngine.to_literal(ctx, v, hint, variable.type) + literal_map[k] = lit + + literal_inputs = literal_models.LiteralMap(literals=literal_map) + + try: + # Currently, this will only execute the flyte entity referenced by + # flyte_id in the same project and domain. However, it is possible to execute it in a different project + # and domain, which is specified in the first two arguments of client.create_execution. This is useful + # in the case that I want to use a flyte entity from e.g. project "A" but actually execute the entity on a + # different project "B". For now, this method doesn't support this use case. + exec_id = self.client.create_execution( + project or self.default_project, + domain or self.default_domain, + execution_name, + ExecutionSpec( + entity.id, + ExecutionMetadata( + ExecutionMetadata.ExecutionMode.MANUAL, + "placeholder", # Admin replaces this from oidc token if auth is enabled. + 0, + ), + overwrite_cache=overwrite_cache, + notifications=notifications, + disable_all=options.disable_notifications, + labels=options.labels, + annotations=options.annotations, + raw_output_data_config=options.raw_output_data_config, + auth_role=None, + max_parallelism=options.max_parallelism, + security_context=options.security_context, + envs=common_models.Envs(envs) if envs else None, + tags=tags, + cluster_assignment=ClusterAssignment(cluster_pool=cluster_pool) if cluster_pool else None, + ), + literal_inputs, + ) + except user_exceptions.FlyteEntityAlreadyExistsException: + logger.warning( + f"Execution with Execution ID {execution_name} already exists. " + f"Assuming this is the same execution, returning!" + ) + exec_id = WorkflowExecutionIdentifier( + project=project or self.default_project, domain=domain or self.default_domain, name=execution_name + ) + execution = FlyteWorkflowExecution.promote_from_model(self.client.get_execution(exec_id)) + + if wait: + return self.wait(execution) + return execution + + def _resolve_identifier_kwargs( + self, + entity: typing.Any, + project: str, + domain: str, + name: str, + version: str, + ) -> ResolvedIdentifiers: + """ + Resolves the identifier attributes based on user input, falling back on the default project/domain and + auto-generated version, and ultimately the entity project/domain if entity is a remote flyte entity. + """ + ident = ResolvedIdentifiers( + project=project or self.default_project, + domain=domain or self.default_domain, + name=name or entity.name, + version=version, + ) + if not (ident.project and ident.domain and ident.name): + raise ValueError( + f"Cannot launch an execution with missing project/domain/name {ident} for entity type {type(entity)}." + f" Specify them in the execute method or when initializing FlyteRemote" + ) + return ident + + def execute( + self, + entity: typing.Union[FlyteTask, FlyteLaunchPlan, FlyteWorkflow, PythonTask, WorkflowBase, LaunchPlan], + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + name: str = None, + version: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + image_config: typing.Optional[ImageConfig] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """ + Execute a task, workflow, or launchplan, either something that's been declared locally, or a fetched entity. + + This method supports: + - ``Flyte{Task, Workflow, LaunchPlan}`` remote module objects. + - ``@task``-decorated functions and ``TaskTemplate`` tasks. + - ``@workflow``-decorated functions. + - ``LaunchPlan`` objects. + + For local entities, this code will attempt to find the entity first, and if missing, will compile and register + the object. + + Not all arguments are relevant in all circumstances. For example, there's no reason to use the serialization + settings for entities that have already been registered on Admin. + + :param options: + :param entity: entity to execute + :param inputs: dictionary mapping argument names to values + :param project: execute entity in this project. If entity doesn't exist in the project, register the entity + first before executing. + :param domain: execute entity in this domain. If entity doesn't exist in the domain, register the entity + first before executing. + :param name: execute entity using this name. If not None, use this value instead of ``entity.name`` + :param version: execute entity using this version. If None, uses auto-generated value. + :param execution_name: name of the execution. If None, uses auto-generated value. + :param image_config: + :param wait: if True, waits for execution to complete + :param type_hints: Python types to be passed to the TypeEngine so that it knows how to properly convert the + input values for the execution into Flyte literals. If missing, will default to first guessing the type + using the type engine, and then to ``type(v)``. Providing the correct Python types is particularly important + if the inputs are containers like lists or maps, or if the Python type is one of the more complex Flyte + provided classes (like a StructuredDataset that's annotated with columns). + :param overwrite_cache: Allows for all cached values of a workflow and its tasks to be overwritten + for a single execution. If enabled, all calculations are performed even if cached results would + be available, overwriting the stored data once execution finishes successfully. + :param envs: Environment variables to be set for the execution. + :param tags: Tags to be set for the execution. + :param cluster_pool: Specify cluster pool on which newly created execution should be placed. + + .. note: + + The ``name`` and ``version`` arguments do not apply to ``FlyteTask``, ``FlyteLaunchPlan``, and + ``FlyteWorkflow`` entity inputs. These values are determined by referencing the entity identifier values. + """ + if entity.python_interface: + type_hints = type_hints or entity.python_interface.inputs + if isinstance(entity, FlyteTask) or isinstance(entity, FlyteLaunchPlan): + return self.execute_remote_task_lp( + entity=entity, + inputs=inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, FlyteWorkflow): + return self.execute_remote_wf( + entity=entity, + inputs=inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, ReferenceTask): + return self.execute_reference_task( + entity=entity, + inputs=inputs, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, ReferenceWorkflow): + return self.execute_reference_workflow( + entity=entity, + inputs=inputs, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, ReferenceLaunchPlan): + return self.execute_reference_launch_plan( + entity=entity, + inputs=inputs, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, PythonTask): + return self.execute_local_task( + entity=entity, + inputs=inputs, + project=project, + domain=domain, + name=name, + version=version, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + image_config=image_config, + wait=wait, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, WorkflowBase): + return self.execute_local_workflow( + entity=entity, + inputs=inputs, + project=project, + domain=domain, + name=name, + version=version, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + image_config=image_config, + options=options, + wait=wait, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + if isinstance(entity, LaunchPlan): + return self.execute_local_launch_plan( + entity=entity, + inputs=inputs, + version=version, + project=project, + domain=domain, + name=name, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + raise NotImplementedError(f"entity type {type(entity)} not recognized for execution") + + # Flyte Remote Entities + # --------------------- + + def execute_remote_task_lp( + self, + entity: typing.Union[FlyteTask, FlyteLaunchPlan], + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Execute a FlyteTask, or FlyteLaunchplan. + + NOTE: the name and version arguments are currently not used and only there consistency in the function signature + """ + return self._execute( + entity, + inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + wait=wait, + options=options, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + def execute_remote_wf( + self, + entity: FlyteWorkflow, + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Execute a FlyteWorkflow. + + NOTE: the name and version arguments are currently not used and only there consistency in the function signature + """ + launch_plan = self.fetch_launch_plan(entity.id.project, entity.id.domain, entity.id.name, entity.id.version) + return self.execute_remote_task_lp( + launch_plan, + inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + # Flyte Reference Entities + # --------------------- + def execute_reference_task( + self, + entity: ReferenceTask, + inputs: typing.Dict[str, typing.Any], + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Execute a ReferenceTask.""" + resolved_identifiers = ResolvedIdentifiers( + project=entity.reference.project, + domain=entity.reference.domain, + name=entity.reference.name, + version=entity.reference.version, + ) + resolved_identifiers_dict = asdict(resolved_identifiers) + try: + flyte_task: FlyteTask = self.fetch_task(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + raise ValueError( + f'missing entity of type ReferenceTask with identifier project:"{entity.reference.project}" domain:"{entity.reference.domain}" name:"{entity.reference.name}" version:"{entity.reference.version}"' + ) + + return self.execute( + flyte_task, + inputs, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + def execute_reference_workflow( + self, + entity: ReferenceWorkflow, + inputs: typing.Dict[str, typing.Any], + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Execute a ReferenceWorkflow.""" + resolved_identifiers = ResolvedIdentifiers( + project=entity.reference.project, + domain=entity.reference.domain, + name=entity.reference.name, + version=entity.reference.version, + ) + resolved_identifiers_dict = asdict(resolved_identifiers) + try: + self.fetch_workflow(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + raise ValueError( + f'missing entity of type ReferenceWorkflow with identifier project:"{entity.reference.project}" domain:"{entity.reference.domain}" name:"{entity.reference.name}" version:"{entity.reference.version}"' + ) + + try: + flyte_lp = self.fetch_launch_plan(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + remote_logger.info("Try to register default launch plan because it wasn't found in Flyte Admin!") + default_lp = LaunchPlan.get_default_launch_plan(self.context, entity) + self.register_launch_plan( + default_lp, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + version=resolved_identifiers.version, + options=options, + ) + flyte_lp = self.fetch_launch_plan(**resolved_identifiers_dict) + + return self.execute( + flyte_lp, + inputs, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + wait=wait, + options=options, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + def execute_reference_launch_plan( + self, + entity: ReferenceLaunchPlan, + inputs: typing.Dict[str, typing.Any], + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + type_hints: typing.Optional[typing.Dict[str, typing.Type]] = None, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """Execute a ReferenceLaunchPlan.""" + resolved_identifiers = ResolvedIdentifiers( + project=entity.reference.project, + domain=entity.reference.domain, + name=entity.reference.name, + version=entity.reference.version, + ) + resolved_identifiers_dict = asdict(resolved_identifiers) + try: + flyte_launchplan: FlyteLaunchPlan = self.fetch_launch_plan(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + raise ValueError( + f'missing entity of type ReferenceLaunchPlan with identifier project:"{entity.reference.project}" domain:"{entity.reference.domain}" name:"{entity.reference.name}" version:"{entity.reference.version}"' + ) + + return self.execute( + flyte_launchplan, + inputs, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=type_hints, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + # Flytekit Entities + # ----------------- + + def execute_local_task( + self, + entity: PythonTask, + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + name: str = None, + version: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + image_config: typing.Optional[ImageConfig] = None, + wait: bool = False, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """ + Execute a @task-decorated function or TaskTemplate task. + + :param entity: local task entity. + :param inputs: register the task, which requires compiling the task, before running it. + :param project: The execution project, will default to the Remote's default project. + :param domain: The execution domain, will default to the Remote's default domain. + :param name: specific name of the task to run. + :param version: specific version of the task to run. + :param execution_name: If provided, will use this name for the execution. + :param image_config: If provided, will use this image config in the pod. + :param wait: If True, will wait for the execution to complete before returning. + :param overwrite_cache: If True, will overwrite the cache. + :param envs: Environment variables to set for the execution. + :param tags: Tags to set for the execution. + :param cluster_pool: Specify cluster pool on which newly created execution should be placed. + :return: FlyteWorkflowExecution object. + """ + resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version) + resolved_identifiers_dict = asdict(resolved_identifiers) + try: + flyte_task: FlyteTask = self.fetch_task(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + if isinstance(entity, PythonAutoContainerTask): + if not image_config: + raise ValueError(f"PythonTask {entity.name} not already registered, but image_config missing") + ss = SerializationSettings( + image_config=image_config, + project=project or self.default_project, + domain=domain or self._default_domain, + version=version, + ) + flyte_task: FlyteTask = self.register_task(entity, ss) + + return self.execute( + flyte_task, + inputs, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + wait=wait, + type_hints=entity.python_interface.inputs, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + def execute_local_workflow( + self, + entity: WorkflowBase, + inputs: typing.Dict[str, typing.Any], + project: str = None, + domain: str = None, + name: str = None, + version: str = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + image_config: typing.Optional[ImageConfig] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """ + Execute an @workflow decorated function. + :param entity: + :param inputs: + :param project: + :param domain: + :param name: + :param version: + :param execution_name: + :param image_config: + :param options: + :param wait: + :param overwrite_cache: + :param envs: + :param tags: + :param cluster_pool: + :return: + """ + resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version) + resolved_identifiers_dict = asdict(resolved_identifiers) + ss = SerializationSettings( + image_config=image_config, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + version=resolved_identifiers.version, + ) + try: + # Just fetch to see if it already exists + # todo: Add logic to check that the fetched workflow is functionally equivalent. + self.fetch_workflow(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + logger.info("Registering workflow because it wasn't found in Flyte Admin.") + if not image_config: + raise ValueError("Need image config since we are registering") + self.register_workflow(entity, ss, version=version, options=options) + + try: + flyte_lp = self.fetch_launch_plan(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + logger.info("Try to register default launch plan because it wasn't found in Flyte Admin!") + default_lp = LaunchPlan.get_default_launch_plan(self.context, entity) + self.register_launch_plan( + default_lp, + project=resolved_identifiers.project, + domain=resolved_identifiers.domain, + version=version, + options=options, + ) + flyte_lp = self.fetch_launch_plan(**resolved_identifiers_dict) + + return self.execute( + flyte_lp, + inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + wait=wait, + options=options, + type_hints=entity.python_interface.inputs, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + def execute_local_launch_plan( + self, + entity: LaunchPlan, + inputs: typing.Dict[str, typing.Any], + version: str, + project: typing.Optional[str] = None, + domain: typing.Optional[str] = None, + name: typing.Optional[str] = None, + execution_name: typing.Optional[str] = None, + execution_name_prefix: typing.Optional[str] = None, + options: typing.Optional[Options] = None, + wait: bool = False, + overwrite_cache: typing.Optional[bool] = None, + envs: typing.Optional[typing.Dict[str, str]] = None, + tags: typing.Optional[typing.List[str]] = None, + cluster_pool: typing.Optional[str] = None, + ) -> FlyteWorkflowExecution: + """ + + :param entity: The locally defined launch plan object + :param inputs: Inputs to be passed into the execution as a dict with Python native values. + :param version: The version to look up/register the launch plan (if not already exists) + :param project: The same as version, but will default to the Remote object's project + :param domain: The same as version, but will default to the Remote object's domain + :param name: The same as version, but will default to the entity's name + :param execution_name: If specified, will be used as the execution name instead of randomly generating. + :param options: Options to be passed into the execution. + :param wait: If True, will wait for the execution to complete before returning. + :param overwrite_cache: If True, will overwrite the cache. + :param envs: Environment variables to be passed into the execution. + :param tags: Tags to be passed into the execution. + :param cluster_pool: Specify cluster pool on which newly created execution should be placed. + :return: FlyteWorkflowExecution object + """ + resolved_identifiers = self._resolve_identifier_kwargs(entity, project, domain, name, version) + resolved_identifiers_dict = asdict(resolved_identifiers) + project = resolved_identifiers.project + domain = resolved_identifiers.domain + try: + flyte_launchplan: FlyteLaunchPlan = self.fetch_launch_plan(**resolved_identifiers_dict) + except FlyteEntityNotExistException: + flyte_launchplan: FlyteLaunchPlan = self.register_launch_plan( + entity, + version=version, + project=project, + domain=domain, + ) + return self.execute_remote_task_lp( + flyte_launchplan, + inputs, + project=project, + domain=domain, + execution_name=execution_name, + execution_name_prefix=execution_name_prefix, + options=options, + wait=wait, + type_hints=entity.python_interface.inputs, + overwrite_cache=overwrite_cache, + envs=envs, + tags=tags, + cluster_pool=cluster_pool, + ) + + ################################### + # Wait for Executions to Complete # + ################################### + + def wait( + self, + execution: FlyteWorkflowExecution, + timeout: typing.Optional[timedelta] = None, + poll_interval: typing.Optional[timedelta] = None, + sync_nodes: bool = True, + ) -> FlyteWorkflowExecution: + """Wait for an execution to finish. + + :param execution: execution object to wait on + :param timeout: maximum amount of time to wait + :param poll_interval: sync workflow execution at this interval + :param sync_nodes: passed along to the sync call for the workflow execution + """ + poll_interval = poll_interval or timedelta(seconds=30) + time_to_give_up = datetime.max if timeout is None else datetime.now() + timeout + + while datetime.now() < time_to_give_up: + execution = self.sync_execution(execution, sync_nodes=sync_nodes) + if execution.is_done: + return execution + time.sleep(poll_interval.total_seconds()) + + raise user_exceptions.FlyteTimeout(f"Execution {self} did not complete before timeout.") + + ######################## + # Sync Execution State # + ######################## + + def sync( + self, + execution: FlyteWorkflowExecution, + entity_definition: typing.Union[FlyteWorkflow, FlyteTask] = None, + sync_nodes: bool = False, + ) -> FlyteWorkflowExecution: + """ + This function was previously a singledispatchmethod. We've removed that but this function remains + so that we don't break people. + + :param execution: + :param entity_definition: + :param sync_nodes: By default sync will fetch data on all underlying node executions (recursively, + so subworkflows will also get picked up). Set this to False in order to prevent that (which + will make this call faster). + :return: Returns the same execution object, but with additional information pulled in. + """ + if not isinstance(execution, FlyteWorkflowExecution): + raise ValueError(f"remote.sync should only be called on workflow executions, got {type(execution)}") + return self.sync_execution(execution, entity_definition, sync_nodes) + + def sync_execution( + self, + execution: FlyteWorkflowExecution, + entity_definition: typing.Union[FlyteWorkflow, FlyteTask] = None, + sync_nodes: bool = False, + ) -> FlyteWorkflowExecution: + """ + Sync a FlyteWorkflowExecution object with its corresponding remote state. + """ + if entity_definition is not None: + raise ValueError("Entity definition arguments aren't supported when syncing workflow executions") + + # Update closure, and then data, because we don't want the execution to finish between when we get the data, + # and then for the closure to have is_done to be true. + execution._closure = self.client.get_execution(execution.id).closure + execution_data = self.client.get_execution_data(execution.id) + lp_id = execution.spec.launch_plan + underlying_node_executions = [] + if sync_nodes: + underlying_node_executions = [ + FlyteNodeExecution.promote_from_model(n) for n in iterate_node_executions(self.client, execution.id) + ] + + # This condition is only true for single-task executions + if execution.spec.launch_plan.resource_type == ResourceType.TASK: + flyte_entity = self.fetch_task(lp_id.project, lp_id.domain, lp_id.name, lp_id.version) + node_interface = flyte_entity.interface + if sync_nodes: + # Need to construct the mapping. There should've been returned exactly three nodes, a start, + # an end, and a task node. + task_node_exec = [ + x + for x in filter( + lambda x: x.id.node_id != constants.START_NODE_ID and x.id.node_id != constants.END_NODE_ID, + underlying_node_executions, + ) + ] + # We need to manually make a map of the nodes since there is none for single task executions + # Assume the first one is the only one. + node_mapping = ( + { + task_node_exec[0].id.node_id: FlyteNode( + id=flyte_entity.id, + upstream_nodes=[], + bindings=[], + metadata=NodeMetadata(name=""), + task_node=FlyteTaskNode(flyte_entity), + ) + } + if len(task_node_exec) >= 1 + else {} # This is for the case where node executions haven't appeared yet + ) + # This is the default case, an execution of a normal workflow through a launch plan + else: + fetched_lp = self.fetch_launch_plan(lp_id.project, lp_id.domain, lp_id.name, lp_id.version) + node_interface = fetched_lp.flyte_workflow.interface + execution._flyte_workflow = fetched_lp.flyte_workflow + node_mapping = fetched_lp.flyte_workflow._node_map + + # update node executions (if requested), and inputs/outputs + if sync_nodes: + node_execs = {} + for n in underlying_node_executions: + node_execs[n.id.node_id] = self.sync_node_execution(n, node_mapping) # noqa + execution._node_executions = node_execs + return self._assign_inputs_and_outputs(execution, execution_data, node_interface) + + def sync_node_execution( + self, + execution: FlyteNodeExecution, + node_mapping: typing.Dict[str, FlyteNode], + ) -> FlyteNodeExecution: + """ + Get data backing a node execution. These FlyteNodeExecution objects should've come from Admin with the model + fields already populated correctly. For purposes of the remote experience, we'd like to supplement the object + with some additional fields: + - inputs/outputs + - task/workflow executions, and/or underlying node executions in the case of parent nodes + - TypedInterface (remote wrapper type) + + A node can have several different types of executions behind it. That is, the node could've run (perhaps + multiple times because of retries): + - A task + - A static subworkflow + - A dynamic subworkflow (which in turn may have run additional tasks, subwfs, and/or launch plans) + - A launch plan + + The data model is complicated, so ascertaining which of these happened is a bit tricky. That logic is + encapsulated in this function. + """ + # For single task execution - the metadata spec node id is missing. In these cases, revert to regular node id + node_id = execution.metadata.spec_node_id + # This case supports single-task execution compiled workflows. + if node_id and node_id not in node_mapping and execution.id.node_id in node_mapping: + node_id = execution.id.node_id + logger.debug( + f"Using node execution ID {node_id} instead of spec node id " + f"{execution.metadata.spec_node_id}, single-task execution likely." + ) + # This case supports single-task execution compiled workflows with older versions of admin/propeller + if not node_id: + node_id = execution.id.node_id + logger.debug(f"No metadata spec_node_id found, using {node_id}") + + # First see if it's a dummy node, if it is, we just skip it. + if constants.START_NODE_ID in node_id or constants.END_NODE_ID in node_id: + return execution + + # Look for the Node object in the mapping supplied + if node_id in node_mapping: + execution._node = node_mapping[node_id] + else: + raise Exception(f"Missing node from mapping: {node_id}") + + # Get the node execution data + node_execution_get_data_response = self.client.get_node_execution_data(execution.id) + + # Calling a launch plan directly case + # If a node ran a launch plan directly (i.e. not through a dynamic task or anything) then + # the closure should have a workflow_node_metadata populated with the launched execution id. + # The parent node flag should not be populated here + # This is the simplest case + if not execution.metadata.is_parent_node and execution.closure.workflow_node_metadata: + launched_exec_id = execution.closure.workflow_node_metadata.execution_id + # This is a recursive call, basically going through the same process that brought us here in the first + # place, but on the launched execution. + launched_exec = self.fetch_execution( + project=launched_exec_id.project, domain=launched_exec_id.domain, name=launched_exec_id.name + ) + self.sync_execution(launched_exec) + if launched_exec.is_done: + # The synced underlying execution should've had these populated. + execution._inputs = launched_exec.inputs + execution._outputs = launched_exec.outputs + execution._workflow_executions.append(launched_exec) + execution._interface = launched_exec._flyte_workflow.interface + return execution + + # If a node ran a static subworkflow or a dynamic subworkflow then the parent flag will be set. + if execution.metadata.is_parent_node: + # We'll need to query child node executions regardless since this is a parent node + child_node_executions = iterate_node_executions( + self.client, + workflow_execution_identifier=execution.id.execution_id, + unique_parent_id=execution.id.node_id, + ) + child_node_executions = [x for x in child_node_executions] + + # If this was a dynamic task, then there should be a CompiledWorkflowClosure inside the + # NodeExecutionGetDataResponse + if node_execution_get_data_response.dynamic_workflow is not None: + compiled_wf = node_execution_get_data_response.dynamic_workflow.compiled_workflow + node_launch_plans = {} + # TODO: Inspect branch nodes for launch plans + for node in FlyteWorkflow.get_non_system_nodes(compiled_wf.primary.template.nodes): + if ( + node.workflow_node is not None + and node.workflow_node.launchplan_ref is not None + and node.workflow_node.launchplan_ref not in node_launch_plans + ): + node_launch_plans[node.workflow_node.launchplan_ref] = self.client.get_launch_plan( + node.workflow_node.launchplan_ref + ).spec + + dynamic_flyte_wf = FlyteWorkflow.promote_from_closure(compiled_wf, node_launch_plans) + execution._underlying_node_executions = [ + self.sync_node_execution(FlyteNodeExecution.promote_from_model(cne), dynamic_flyte_wf._node_map) + for cne in child_node_executions + ] + execution._task_executions = [ + node_exes.task_executions for node_exes in execution.subworkflow_node_executions.values() + ] + + execution._interface = dynamic_flyte_wf.interface + + # Handle the case where it's a static subworkflow + elif isinstance(execution._node.flyte_entity, FlyteWorkflow): + sub_flyte_workflow = execution._node.flyte_entity + sub_node_mapping = {n.id: n for n in sub_flyte_workflow.flyte_nodes} + execution._underlying_node_executions = [ + self.sync_node_execution(FlyteNodeExecution.promote_from_model(cne), sub_node_mapping) + for cne in child_node_executions + ] + execution._interface = sub_flyte_workflow.interface + + # Handle the case where it's a branch node + elif execution._node.branch_node is not None: + logger.info( + "Skipping branch node execution for now - branch nodes will " + "not have inputs and outputs filled in" + ) + return execution + else: + logger.error(f"NE {execution} undeterminable, {type(execution._node)}, {execution._node}") + raise Exception(f"Node execution undeterminable, entity has type {type(execution._node)}") + + # Handle the case for gate nodes + elif execution._node.gate_node is not None: + logger.info("Skipping gate node execution for now - gate nodes don't have inputs and outputs filled in") + return execution + + # This is the plain ol' task execution case + else: + execution._task_executions = [ + self.sync_task_execution( + FlyteTaskExecution.promote_from_model(t), node_mapping[node_id].task_node.flyte_task + ) + for t in iterate_task_executions(self.client, execution.id) + ] + execution._interface = execution._node.flyte_entity.interface + + self._assign_inputs_and_outputs( + execution, + node_execution_get_data_response, + execution.interface, + ) + + return execution + + def sync_task_execution( + self, execution: FlyteTaskExecution, entity_definition: typing.Optional[FlyteTask] = None + ) -> FlyteTaskExecution: + """Sync a FlyteTaskExecution object with its corresponding remote state.""" + execution._closure = self.client.get_task_execution(execution.id).closure + execution_data = self.client.get_task_execution_data(execution.id) + task_id = execution.id.task_id + if entity_definition is None: + entity_definition = self.fetch_task(task_id.project, task_id.domain, task_id.name, task_id.version) + return self._assign_inputs_and_outputs(execution, execution_data, entity_definition.interface) + + ############################# + # Terminate Execution State # + ############################# + + def terminate(self, execution: FlyteWorkflowExecution, cause: str): + """Terminate a workflow execution. + + :param execution: workflow execution to terminate + :param cause: reason for termination + """ + self.client.terminate_execution(execution.id, cause) + + ################## + # Helper Methods # + ################## + + def _assign_inputs_and_outputs( + self, + execution: typing.Union[FlyteWorkflowExecution, FlyteNodeExecution, FlyteTaskExecution], + execution_data, + interface: TypedInterface, + ): + """Helper for assigning synced inputs and outputs to an execution object.""" + input_literal_map = self._get_input_literal_map(execution_data) + execution._inputs = LiteralsResolver(input_literal_map.literals, interface.inputs, self.context) + + if execution.is_done and not execution.error: + output_literal_map = self._get_output_literal_map(execution_data) + execution._outputs = LiteralsResolver(output_literal_map.literals, interface.outputs, self.context) + return execution + + def _get_input_literal_map(self, execution_data: ExecutionDataResponse) -> literal_models.LiteralMap: + # Inputs are returned inline unless they are too big, in which case a url blob pointing to them is returned. + if bool(execution_data.full_inputs.literals): + return execution_data.full_inputs + elif execution_data.inputs.bytes > 0: + with self.remote_context() as ctx: + tmp_name = os.path.join(ctx.file_access.local_sandbox_dir, "inputs.pb") + ctx.file_access.get_data(execution_data.inputs.url, tmp_name) + return literal_models.LiteralMap.from_flyte_idl( + utils.load_proto_from_file(literals_pb2.LiteralMap, tmp_name) + ) + return literal_models.LiteralMap({}) + + def _get_output_literal_map(self, execution_data: ExecutionDataResponse) -> literal_models.LiteralMap: + # Outputs are returned inline unless they are too big, in which case a url blob pointing to them is returned. + if bool(execution_data.full_outputs.literals): + return execution_data.full_outputs + elif execution_data.outputs.bytes > 0: + with self.remote_context() as ctx: + tmp_name = os.path.join(ctx.file_access.local_sandbox_dir, "outputs.pb") + ctx.file_access.get_data(execution_data.outputs.url, tmp_name) + return literal_models.LiteralMap.from_flyte_idl( + utils.load_proto_from_file(literals_pb2.LiteralMap, tmp_name) + ) + return literal_models.LiteralMap({}) + + def generate_console_http_domain(self) -> str: + """ + This should generate the domain where console is hosted. + + :return: + """ + # If the console endpoint is explicitly set, return it, else derive it from the admin config + if self.config.platform.console_endpoint: + return self.config.platform.console_endpoint + protocol = "http" if self.config.platform.insecure else "https" + endpoint = self.config.platform.endpoint + # N.B.: this assumes that in case we have an identical configuration as the sandbox default config we are running single binary. The intent here is + # to ensure that the urls produced in the getting started guide point to the correct place. + if self.config.platform == Config.for_sandbox().platform: + endpoint = "localhost:30080" + return protocol + f"://{endpoint}" + + def generate_console_url( + self, + entity: typing.Union[ + FlyteWorkflowExecution, FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflow, FlyteTask, FlyteLaunchPlan + ], + ): + """ + Generate a Flyteconsole URL for the given Flyte remote endpoint. + This will automatically determine if this is an execution or an entity and change the type automatically + """ + if isinstance(entity, (FlyteWorkflowExecution, FlyteNodeExecution, FlyteTaskExecution)): + return f"{self.generate_console_http_domain()}/console/projects/{entity.id.project}/domains/{entity.id.domain}/executions/{entity.id.name}" # noqa + + if not isinstance(entity, (FlyteWorkflow, FlyteTask, FlyteLaunchPlan)): + raise ValueError(f"Only remote entities can be looked at in the console, got type {type(entity)}") + rt = "workflow" + if entity.id.resource_type == ResourceType.TASK: + rt = "task" + elif entity.id.resource_type == ResourceType.LAUNCH_PLAN: + rt = "launch_plan" + return f"{self.generate_console_http_domain()}/console/projects/{entity.id.project}/domains/{entity.id.domain}/{rt}/{entity.name}/version/{entity.id.version}" # noqa + + def launch_backfill( + self, + project: str, + domain: str, + from_date: datetime, + to_date: datetime, + launchplan: str, + launchplan_version: str = None, + execution_name: str = None, + version: str = None, + dry_run: bool = False, + execute: bool = True, + parallel: bool = False, + failure_policy: typing.Optional[WorkflowFailurePolicy] = None, + overwrite_cache: typing.Optional[bool] = None, + ) -> typing.Optional[FlyteWorkflowExecution, FlyteWorkflow, WorkflowBase]: + """ + Creates and launches a backfill workflow for the given launchplan. If launchplan version is not specified, + then the latest launchplan is retrieved. + The from_date is exclusive and end_date is inclusive and backfill run for all instances in between. :: + -> (start_date - exclusive, end_date inclusive) + + If dry_run is specified, the workflow is created and returned. + If execute==False is specified then the workflow is created and registered. + In the last case, the workflow is created, registered and executed. + + The `parallel` flag can be used to generate a workflow where all launchplans can be run in parallel. Default + is that execute backfill is run sequentially + + :param project: str project name + :param domain: str domain name + :param from_date: datetime generate a backfill starting at this datetime (exclusive) + :param to_date: datetime generate a backfill ending at this datetime (inclusive) + :param launchplan: str launchplan name in the flyte backend + :param launchplan_version: str (optional) version for the launchplan. If not specified the most recent will be retrieved + :param execution_name: str (optional) the generated execution will be named so. this can help in ensuring idempotency + :param version: str (optional) version to be used for the newly created workflow. + :param dry_run: bool do not register or execute the workflow + :param execute: bool Register and execute the wwkflow. + :param parallel: if the backfill should be run in parallel. False (default) will run each bacfill sequentially. + :param failure_policy: WorkflowFailurePolicy (optional) to be used for the newly created workflow. This can + control failure behavior - whether to continue on failure or stop immediately on failure + :param overwrite_cache: if True, will overwrite the cache. + :return: In case of dry-run, return WorkflowBase, else if no_execute return FlyteWorkflow else in the default + case return a FlyteWorkflowExecution + """ + lp = self.fetch_launch_plan(project=project, domain=domain, name=launchplan, version=launchplan_version) + wf, start, end = create_backfill_workflow( + start_date=from_date, end_date=to_date, for_lp=lp, parallel=parallel, failure_policy=failure_policy + ) + if dry_run: + logger.warning("Dry Run enabled. Workflow will not be registered and or executed.") + return wf + + unique_fingerprint = f"{start}-{end}-{launchplan}-{launchplan_version}" + h = hashlib.md5() + h.update(unique_fingerprint.encode("utf-8")) + unique_fingerprint_encoded = base64.urlsafe_b64encode(h.digest()).decode("ascii") + if not version: + version = unique_fingerprint_encoded + ss = SerializationSettings( + image_config=ImageConfig.auto(), + project=project, + domain=domain, + version=version, + ) + remote_wf = self.register_workflow(wf, serialization_settings=ss) + + if not execute: + return remote_wf + + return self.execute( + remote_wf, + inputs={}, + project=project, + domain=domain, + execution_name=execution_name, + overwrite_cache=overwrite_cache, + ) + + @staticmethod + def get_extra_headers_for_protocol(native_url): + if native_url.startswith("abfs://"): + return {"x-ms-blob-type": "BlockBlob"} + return {} + + def activate_launchplan(self, ident: Identifier): + """ + Given a launchplan, activate it, all previous versions are deactivated. + """ + self.client.update_launch_plan(id=ident, state=LaunchPlanState.ACTIVE) + + def download( + self, data: typing.Union[LiteralsResolver, Literal, LiteralMap], download_to: str, recursive: bool = True + ): + """ + Download the data to the specified location. If the data is a LiteralsResolver, LiteralMap and if recursive is + specified, then all file like objects will be recursively downloaded (e.g. FlyteFile/Dir (blob), + StructuredDataset etc). + + Note: That it will use your sessions credentials to access the remote location. For sandbox, this should be + automatically configured, assuming you are running sandbox locally. For other environments, you will need to + configure your credentials appropriately. + + :param data: data to be downloaded + :param download_to: location to download to (str) that should be a valid path + :param recursive: if the data is a LiteralsResolver or LiteralMap, then this flag will recursively download + """ + download_to = pathlib.Path(download_to) + if isinstance(data, Literal): + download_literal(self.file_access, "data", data, download_to) + else: + if not recursive: + raise click.UsageError("Please specify --recursive to download all variables in a literal map.") + if isinstance(data, LiteralsResolver): + lm = data.literals + else: + lm = data + for var, literal in lm.items(): + download_literal(self.file_access, var, literal, download_to) diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 106bc8c760..be92117e93 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -1,4 +1,4 @@ -use prost::Message; +use prost::{Message}; use pyo3::prelude::*; use pyo3::types::PyBytes; use tokio::runtime::{Builder, Runtime}; @@ -43,7 +43,7 @@ impl FlyteClient { // fn serialize_tobytes(proto) { // } - pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { + pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { let bytes = bytes_obj.as_bytes(); let decoded: ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); let req = tonic::Request::new(decoded); @@ -54,23 +54,24 @@ impl FlyteClient { let mut buf = vec![]; res.encode(&mut buf).unwrap(); - PyBytes::new(py, &buf).into() + Ok(PyBytes::new_bound(py, &buf).into()) } - pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { + pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { let bytes = bytes_obj.as_bytes(); let decoded: ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); let req = tonic::Request::new(decoded); + // Interacting with the gRPC server: flyteadmin - let res = (self.runtime.block_on(self.admin_service.list_tasks(req))).unwrap().into_inner(); + let res = self.runtime.block_on(self.admin_service.list_tasks(req)).unwrap().into_inner(); let mut buf = vec![]; res.encode(&mut buf).unwrap(); - PyBytes::new(py, &buf).into() + Ok(PyBytes::new_bound(py, &buf).into()) } - pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyObject { // PyResult> + pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { // PyResult> let bytes = bytes_obj.as_bytes(); println!("Received bytes: {:?}", bytes); let decoded: Task = Message::decode(&bytes.to_vec()[..]).unwrap(); @@ -79,7 +80,7 @@ impl FlyteClient { decoded.encode(&mut buf).unwrap(); println!("Serialized Task: {:?}", decoded); // Returning bytes buffer - PyBytes::new(py, &buf).into() + Ok(PyBytes::new_bound(py, &buf).into()) } } diff --git a/flyrs/test_flytekit_remote.py b/flyrs/test_FlyteRemote.py similarity index 90% rename from flyrs/test_flytekit_remote.py rename to flyrs/test_FlyteRemote.py index 303aa6dab7..5a31a868cd 100644 --- a/flyrs/test_flytekit_remote.py +++ b/flyrs/test_FlyteRemote.py @@ -1,5 +1,6 @@ from flytekit.configuration import Config from flytekit.remote import FlyteRemote +from remote import RustFlyteRemote PROJECT = "flytesnacks" DOMAIN = "development" @@ -16,4 +17,4 @@ ) print(task_rs) -assert task_py == task_rs \ No newline at end of file +assert task_py == task_rs diff --git a/flytekit/remote/remote.py b/flytekit/remote/remote.py index 9bd073d1c1..aa95d3cac1 100644 --- a/flytekit/remote/remote.py +++ b/flytekit/remote/remote.py @@ -29,7 +29,6 @@ from flytekit import ImageSpec from flytekit.clients.friendly import SynchronousFlyteClient -from flytekit.clients.friendly_rs import RustSynchronousFlyteClient from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions from flytekit.configuration import Config, FastSerializationSettings, ImageConfig, SerializationSettings from flytekit.core import constants, utils @@ -240,10 +239,7 @@ def context(self) -> FlyteContext: def client(self): """Return a SynchronousFlyteClient for additional operations.""" if not self._client_initialized: - if self._enable_rs: - self._client = RustSynchronousFlyteClient() - else: - self._client = SynchronousFlyteClient(self.config.platform, **self._kwargs) + self._client = SynchronousFlyteClient(self.config.platform, **self._kwargs) self._client_initialized = True return self._client From b132c892ee09c1e91d8eeeae8e3d8dd2857a990a Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 12 Apr 2024 16:23:32 +0800 Subject: [PATCH 08/16] cleanup Signed-off-by: Austin Liu cleanup Signed-off-by: Austin Liu cleanup Signed-off-by: Austin Liu cleanup Signed-off-by: Austin Liu --- flyrs/perf.py | 5 +++-- flyrs/test_FlyteRemote.py | 2 +- flytekit/remote/remote.py | 4 +--- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/flyrs/perf.py b/flyrs/perf.py index 7c58d9076a..8f4247bfd8 100644 --- a/flyrs/perf.py +++ b/flyrs/perf.py @@ -3,11 +3,12 @@ setup = """ from flytekit.remote import FlyteRemote; +from remote import RustFlyteRemote; from flytekit.configuration import Config; PROJECT = "flytesnacks"; DOMAIN = "development"; remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN); -remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN); +remote_rs = RustFlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN); """ fetch_task_in_py = """task_py = remote_py.fetch_task(project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw")""" @@ -28,4 +29,4 @@ plt.plot(Xs, py_elpased,'r-',label='Python gRPC') plt.plot(Xs, rs_elpased,'b-',label='Rust gRPC') plt.legend() -plt.savefig("perf.png") \ No newline at end of file +plt.savefig("perf.png") diff --git a/flyrs/test_FlyteRemote.py b/flyrs/test_FlyteRemote.py index 5a31a868cd..05fc32a814 100644 --- a/flyrs/test_FlyteRemote.py +++ b/flyrs/test_FlyteRemote.py @@ -11,7 +11,7 @@ ) print(task_py) -remote_rs = FlyteRemote(Config.auto(), enable_rs=True, default_project=PROJECT, default_domain=DOMAIN) +remote_rs = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) task_rs = remote_rs.fetch_task( project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" ) diff --git a/flytekit/remote/remote.py b/flytekit/remote/remote.py index aa95d3cac1..0948de5065 100644 --- a/flytekit/remote/remote.py +++ b/flytekit/remote/remote.py @@ -192,7 +192,6 @@ class FlyteRemote(object): def __init__( self, config: Config, - enable_rs: bool = False, default_project: typing.Optional[str] = None, default_domain: typing.Optional[str] = None, data_upload_location: str = "flyte://my-s3-bucket/", @@ -213,7 +212,6 @@ def __init__( if data_upload_location is None: data_upload_location = FlyteContext.current_context().file_access.raw_output_prefix self._kwargs = kwargs - self._enable_rs = enable_rs self._client_initialized = False self._config = config # read config files, env vars, host, ssl options for admin client @@ -236,7 +234,7 @@ def context(self) -> FlyteContext: return self._ctx @property - def client(self): + def client(self) -> SynchronousFlyteClient: """Return a SynchronousFlyteClient for additional operations.""" if not self._client_initialized: self._client = SynchronousFlyteClient(self.config.platform, **self._kwargs) From e06f35ec518ce1eaf8c2f0d65d23865378978627 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Mon, 15 Apr 2024 14:29:43 +0800 Subject: [PATCH 09/16] refactor list_task Signed-off-by: Austin Liu --- flyrs/remote/backfill.py | 107 ++++ flyrs/remote/data.py | 55 +++ flyrs/remote/entities.py | 839 ++++++++++++++++++++++++++++++++ flyrs/remote/executions.py | 212 ++++++++ flyrs/{ => remote}/friendly.py | 30 +- flyrs/remote/interface.py | 11 + flyrs/remote/lazy_entity.py | 67 +++ flyrs/{ => remote}/remote.py | 18 +- flyrs/remote/remote_callable.py | 75 +++ flyrs/remote/remote_fs.py | 266 ++++++++++ flyrs/src/lib.rs | 36 +- flyrs/test_FlyteRemote.py | 23 +- 12 files changed, 1706 insertions(+), 33 deletions(-) create mode 100644 flyrs/remote/backfill.py create mode 100644 flyrs/remote/data.py create mode 100644 flyrs/remote/entities.py create mode 100644 flyrs/remote/executions.py rename flyrs/{ => remote}/friendly.py (98%) create mode 100644 flyrs/remote/interface.py create mode 100644 flyrs/remote/lazy_entity.py rename flyrs/{ => remote}/remote.py (99%) create mode 100644 flyrs/remote/remote_callable.py create mode 100644 flyrs/remote/remote_fs.py diff --git a/flyrs/remote/backfill.py b/flyrs/remote/backfill.py new file mode 100644 index 0000000000..d3d3604e37 --- /dev/null +++ b/flyrs/remote/backfill.py @@ -0,0 +1,107 @@ +import logging +import typing +from datetime import datetime, timedelta + +from croniter import croniter + +from flytekit import LaunchPlan +from flytekit.core.workflow import ImperativeWorkflow, WorkflowBase, WorkflowFailurePolicy +from remote.entities import FlyteLaunchPlan + + +def create_backfill_workflow( + start_date: datetime, + end_date: datetime, + for_lp: typing.Union[LaunchPlan, FlyteLaunchPlan], + parallel: bool = False, + per_node_timeout: timedelta = None, + per_node_retries: int = 0, + failure_policy: typing.Optional[WorkflowFailurePolicy] = None, +) -> typing.Tuple[WorkflowBase, datetime, datetime]: + """ + Generates a new imperative workflow for the launchplan that can be used to backfill the given launchplan. + This can only be used to generate backfilling workflow only for schedulable launchplans + + the Backfill plan is generated as (start_date - exclusive, end_date inclusive) + + .. code-block:: python + :caption: Correct usage for dates example + + lp = Launchplan.get_or_create(...) + start_date = datetime.datetime(2023, 1, 1) + end_date = start_date + datetime.timedelta(days=10) + wf = create_backfill_workflow(start_date, end_date, for_lp=lp) + + + .. code-block:: python + :caption: Incorrect date example + + wf = create_backfill_workflow(end_date, start_date, for_lp=lp) # end_date is before start_date + # OR + wf = create_backfill_workflow(start_date, start_date, for_lp=lp) # start and end date are same + + + :param start_date: datetime generate a backfill starting at this datetime (exclusive) + :param end_date: datetime generate a backfill ending at this datetime (inclusive) + :param for_lp: typing.Union[LaunchPlan, FlyteLaunchPlan] the backfill is generated for this launchplan + :param parallel: if the backfill should be run in parallel. False (default) will run each bacfill sequentially + :param per_node_timeout: timedelta Timeout to use per node + :param per_node_retries: int Retries to user per node + :param failure_policy: WorkflowFailurePolicy Failure policy to use for the backfill workflow + :return: WorkflowBase, datetime datetime -> New generated workflow, datetime for first instance of backfill, datetime for last instance of backfill + """ + if not for_lp: + raise ValueError("Launch plan is required!") + + if start_date >= end_date: + raise ValueError( + f"for a backfill start date should be earlier than end date. Received {start_date} -> {end_date}" + ) + + schedule = for_lp.entity_metadata.schedule if isinstance(for_lp, FlyteLaunchPlan) else for_lp.schedule + + if schedule is None: + raise ValueError("Backfill can only be created for scheduled launch plans") + + if schedule.cron_schedule is not None: + cron_schedule = schedule.cron_schedule + else: + raise NotImplementedError("Currently backfilling only supports cron schedules.") + + logging.info( + f"Generating backfill from {start_date} -> {end_date}. " + f"Parallel?[{parallel}] FailurePolicy[{str(failure_policy)}]" + ) + wf = ImperativeWorkflow(name=f"backfill-{for_lp.name}", failure_policy=failure_policy) + + input_name = schedule.kickoff_time_input_arg + date_iter = croniter(cron_schedule.schedule, start_time=start_date, ret_type=datetime) + prev_node = None + actual_start = None + actual_end = None + while True: + next_start_date = date_iter.get_next() + if not actual_start: + actual_start = next_start_date + if next_start_date >= end_date: + break + actual_end = next_start_date + inputs = {} + if input_name: + inputs[input_name] = next_start_date + next_node = wf.add_launch_plan(for_lp, **inputs) + next_node = next_node.with_overrides( + name=f"b-{next_start_date}", retries=per_node_retries, timeout=per_node_timeout + ) + if not parallel: + if prev_node: + prev_node.runs_before(next_node) + prev_node = next_node + + if actual_end is None: + raise StopIteration( + f"The time window is too small for any backfill instances, first instance after start" + f" date is {actual_start}" + ) + + return wf, actual_start, actual_end diff --git a/flyrs/remote/data.py b/flyrs/remote/data.py new file mode 100644 index 0000000000..84fcff1420 --- /dev/null +++ b/flyrs/remote/data.py @@ -0,0 +1,55 @@ +import os +import pathlib +import typing + +from google.protobuf.json_format import MessageToJson +from rich import print + +from flytekit import BlobType, Literal +from flytekit.core.data_persistence import FileAccessProvider +from flytekit.interaction.rich_utils import RichCallback +from flytekit.interaction.string_literals import literal_string_repr + + +def download_literal( + file_access: FileAccessProvider, var: str, data: Literal, download_to: typing.Optional[pathlib.Path] = None +): + """ + Download a single literal to a file, if it is a blob or structured dataset. + """ + if data is None: + print(f"Skipping {var} as it is None.") + return + if data.scalar: + if data.scalar and (data.scalar.blob or data.scalar.structured_dataset): + uri = data.scalar.blob.uri if data.scalar.blob else data.scalar.structured_dataset.uri + if uri is None: + print("No data to download.") + return + is_multipart = False + if data.scalar.blob: + is_multipart = data.scalar.blob.metadata.type.dimensionality == BlobType.BlobDimensionality.MULTIPART + elif data.scalar.structured_dataset: + is_multipart = True + file_access.get_data( + uri, str(download_to / var) + os.sep, is_multipart=is_multipart, callback=RichCallback() + ) + elif data.scalar.union is not None: + download_literal(file_access, var, data.scalar.union.value, download_to) + elif data.scalar.generic is not None: + with open(download_to / f"{var}.json", "w") as f: + f.write(MessageToJson(data.scalar.generic)) + else: + print( + f"[dim]Skipping {var} val {literal_string_repr(data)} as it is not a blob, structured dataset," + f" or generic type.[/dim]" + ) + return + elif data.collection: + for i, v in enumerate(data.collection.literals): + download_literal(file_access, f"{i}", v, download_to / var) + elif data.map: + download_to = pathlib.Path(download_to) + for k, v in data.map.literals.items(): + download_literal(file_access, f"{k}", v, download_to / var) + print(f"Downloaded f{var} to {download_to}") diff --git a/flyrs/remote/entities.py b/flyrs/remote/entities.py new file mode 100644 index 0000000000..1ffbdff7a2 --- /dev/null +++ b/flyrs/remote/entities.py @@ -0,0 +1,839 @@ +""" +This module contains shadow entities for all Flyte entities as represented in Flyte Admin / Control Plane. +The goal is to enable easy access, manipulation of these entities. +""" +from __future__ import annotations + +from typing import Dict, List, Optional, Tuple, Union + +from flytekit import FlyteContext +from flytekit.core import constants as _constants +from flytekit.core import hash as _hash_mixin +from flytekit.core import hash as hash_mixin +from flytekit.core.promise import create_and_link_node_from_remote +from flytekit.exceptions import system as _system_exceptions +from flytekit.exceptions import user as _user_exceptions +from flytekit.loggers import logger +from flytekit.models import interface as _interface_models +from flytekit.models import launch_plan as _launch_plan_model +from flytekit.models import launch_plan as _launch_plan_models +from flytekit.models import launch_plan as launch_plan_models +from flytekit.models import task as _task_model +from flytekit.models import task as _task_models +from flytekit.models.admin.workflow import WorkflowSpec +from flytekit.models.core import compiler as compiler_models +from flytekit.models.core import identifier as _identifier_model +from flytekit.models.core import identifier as id_models +from flytekit.models.core import workflow as _workflow_model +from flytekit.models.core import workflow as _workflow_models +from flytekit.models.core.identifier import Identifier +from flytekit.models.core.workflow import Node, WorkflowMetadata, WorkflowMetadataDefaults +from flytekit.models.interface import TypedInterface +from flytekit.models.literals import Binding +from flytekit.models.task import TaskSpec +import remote.interface as _interfaces +from remote.remote_callable import RemoteEntity + + +class FlyteTask(hash_mixin.HashOnReferenceMixin, RemoteEntity, TaskSpec): + """A class encapsulating a remote Flyte task.""" + + def __init__( + self, + id, + type, + metadata, + interface, + custom, + container=None, + task_type_version: int = 0, + config=None, + should_register: bool = False, + ): + super(FlyteTask, self).__init__( + template=_task_model.TaskTemplate( + id, + type, + metadata, + interface, + custom, + container=container, + task_type_version=task_type_version, + config=config, + ) + ) + self._should_register = should_register + + @property + def id(self): + """ + This is generated by the system and uniquely identifies the task. + + :rtype: flytekit.models.core.identifier.Identifier + """ + return self.template.id + + @property + def type(self): + """ + This is used to identify additional extensions for use by Propeller or SDK. + + :rtype: Text + """ + return self.template.type + + @property + def metadata(self): + """ + This contains information needed at runtime to determine behavior such as whether or not outputs are + discoverable, timeouts, and retries. + + :rtype: TaskMetadata + """ + return self.template.metadata + + @property + def interface(self): + """ + The interface definition for this task. + + :rtype: flytekit.models.interface.TypedInterface + """ + return self.template.interface + + @property + def custom(self): + """ + Arbitrary dictionary containing metadata for custom plugins. + + :rtype: dict[Text, T] + """ + return self.template.custom + + @property + def task_type_version(self): + return self.template.task_type_version + + @property + def container(self): + """ + If not None, the target of execution should be a container. + + :rtype: Container + """ + return self.template.container + + @property + def config(self): + """ + Arbitrary dictionary containing metadata for parsing and handling custom plugins. + + :rtype: dict[Text, T] + """ + return self.template.config + + @property + def security_context(self): + return self.template.security_context + + @property + def k8s_pod(self): + return self.template.k8s_pod + + @property + def sql(self): + return self.template.sql + + @property + def should_register(self) -> bool: + return self._should_register + + @property + def name(self) -> str: + return self.template.id.name + + @property + def resource_type(self) -> _identifier_model.ResourceType: + return _identifier_model.ResourceType.TASK + + @property + def entity_type_text(self) -> str: + return "Task" + + @classmethod + def promote_from_model(cls, base_model: _task_model.TaskTemplate) -> FlyteTask: + t = cls( + id=base_model.id, + type=base_model.type, + metadata=base_model.metadata, + interface=_interfaces.TypedInterface.promote_from_model(base_model.interface), + custom=base_model.custom, + container=base_model.container, + task_type_version=base_model.task_type_version, + ) + # Override the newly generated name if one exists in the base model + if not base_model.id.is_empty: + t._id = base_model.id + + return t + + +class FlyteTaskNode(_workflow_model.TaskNode): + """A class encapsulating a task that a Flyte node needs to execute.""" + + def __init__(self, flyte_task: FlyteTask): + super(FlyteTaskNode, self).__init__(None) + self._flyte_task = flyte_task + + @property + def reference_id(self) -> id_models.Identifier: + """A globally unique identifier for the task.""" + return self._flyte_task.id + + @property + def flyte_task(self) -> FlyteTask: + return self._flyte_task + + @classmethod + def promote_from_model(cls, task: FlyteTask) -> FlyteTaskNode: + """ + Takes the idl wrapper for a TaskNode, + and returns the hydrated Flytekit object for it by fetching it with the FlyteTask control plane. + """ + return cls(flyte_task=task) + + +class FlyteWorkflowNode(_workflow_model.WorkflowNode): + """A class encapsulating a workflow that a Flyte node needs to execute.""" + + def __init__( + self, + flyte_workflow: FlyteWorkflow = None, + flyte_launch_plan: FlyteLaunchPlan = None, + ): + if flyte_workflow and flyte_launch_plan: + raise _system_exceptions.FlyteSystemException( + "FlyteWorkflowNode cannot be called with both a workflow and a launchplan specified, please pick " + f"one. workflow: {flyte_workflow} launchPlan: {flyte_launch_plan}", + ) + + self._flyte_workflow = flyte_workflow + self._flyte_launch_plan = flyte_launch_plan + super(FlyteWorkflowNode, self).__init__( + launchplan_ref=self._flyte_launch_plan.id if self._flyte_launch_plan else None, + sub_workflow_ref=self._flyte_workflow.id if self._flyte_workflow else None, + ) + + def __repr__(self) -> str: + if self.flyte_workflow is not None: + return f"FlyteWorkflowNode with workflow: {self.flyte_workflow}" + return f"FlyteWorkflowNode with launch plan: {self.flyte_launch_plan}" + + @property + def launchplan_ref(self) -> id_models.Identifier: + """A globally unique identifier for the launch plan, which should map to Admin.""" + return self._flyte_launch_plan.id if self._flyte_launch_plan else None + + @property + def sub_workflow_ref(self): + return self._flyte_workflow.id if self._flyte_workflow else None + + @property + def flyte_launch_plan(self) -> FlyteLaunchPlan: + return self._flyte_launch_plan + + @property + def flyte_workflow(self) -> FlyteWorkflow: + return self._flyte_workflow + + @classmethod + def _promote_workflow( + cls, + wf: _workflow_models.WorkflowTemplate, + sub_workflows: Optional[Dict[Identifier, _workflow_models.WorkflowTemplate]] = None, + tasks: Optional[Dict[Identifier, FlyteTask]] = None, + node_launch_plans: Optional[Dict[Identifier, launch_plan_models.LaunchPlanSpec]] = None, + ) -> FlyteWorkflow: + return FlyteWorkflow.promote_from_model( + wf, + sub_workflows=sub_workflows, + node_launch_plans=node_launch_plans, + tasks=tasks, + ) + + @classmethod + def promote_from_model( + cls, + base_model: _workflow_model.WorkflowNode, + sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], + node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], + tasks: Dict[Identifier, FlyteTask], + converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], + ) -> Tuple[FlyteWorkflowNode, Dict[id_models.Identifier, FlyteWorkflow]]: + if base_model.launchplan_ref is not None: + return ( + cls( + flyte_launch_plan=FlyteLaunchPlan.promote_from_model( + base_model.launchplan_ref, node_launch_plans[base_model.launchplan_ref] + ) + ), + converted_sub_workflows, + ) + elif base_model.sub_workflow_ref is not None: + # the workflow templates for sub-workflows should have been included in the original response + if base_model.reference in sub_workflows: + wf = None + if base_model.reference not in converted_sub_workflows: + wf = cls._promote_workflow( + sub_workflows[base_model.reference], + sub_workflows=sub_workflows, + node_launch_plans=node_launch_plans, + tasks=tasks, + ) + converted_sub_workflows[base_model.reference] = wf + else: + wf = converted_sub_workflows[base_model.reference] + return cls(flyte_workflow=wf), converted_sub_workflows + raise _system_exceptions.FlyteSystemException(f"Subworkflow {base_model.reference} not found.") + + raise _system_exceptions.FlyteSystemException( + "Bad workflow node model, neither subworkflow nor launchplan specified." + ) + + +class FlyteBranchNode(_workflow_model.BranchNode): + def __init__(self, if_else: _workflow_model.IfElseBlock): + super().__init__(if_else) + + @classmethod + def promote_from_model( + cls, + base_model: _workflow_model.BranchNode, + sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], + node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], + tasks: Dict[id_models.Identifier, FlyteTask], + converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], + ) -> Tuple[FlyteBranchNode, Dict[id_models.Identifier, FlyteWorkflow]]: + block = base_model.if_else + block.case._then_node, converted_sub_workflows = FlyteNode.promote_from_model( + block.case.then_node, + sub_workflows, + node_launch_plans, + tasks, + converted_sub_workflows, + ) + + for o in block.other: + o._then_node, converted_sub_workflows = FlyteNode.promote_from_model( + o.then_node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows + ) + + else_node = None + if block.else_node: + else_node, converted_sub_workflows = FlyteNode.promote_from_model( + block.else_node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows + ) + + new_if_else_block = _workflow_model.IfElseBlock(block.case, block.other, else_node, block.error) + + return cls(new_if_else_block), converted_sub_workflows + + +class FlyteGateNode(_workflow_model.GateNode): + @classmethod + def promote_from_model(cls, model: _workflow_model.GateNode): + return cls(model.signal, model.sleep, model.approve) + + +class FlyteArrayNode(_workflow_model.ArrayNode): + @classmethod + def promote_from_model(cls, model: _workflow_model.ArrayNode): + return cls(model._parallelism, model._node, model._min_success_ratio, model._min_successes) + + +class FlyteNode(_hash_mixin.HashOnReferenceMixin, _workflow_model.Node): + """A class encapsulating a remote Flyte node.""" + + def __init__( + self, + id, + upstream_nodes, + bindings, + metadata, + task_node: Optional[FlyteTaskNode] = None, + workflow_node: Optional[FlyteWorkflowNode] = None, + branch_node: Optional[FlyteBranchNode] = None, + gate_node: Optional[FlyteGateNode] = None, + array_node: Optional[FlyteArrayNode] = None, + ): + if not task_node and not workflow_node and not branch_node and not gate_node and not array_node: + raise _user_exceptions.FlyteAssertion( + "An Flyte node must have one of task|workflow|branch|gate|array entity specified at once" + ) + # TODO: Revisit flyte_branch_node and flyte_gate_node, should they be another type like Condition instead + # of a node? + self._flyte_task_node = task_node + if task_node: + self._flyte_entity = task_node.flyte_task + elif workflow_node: + self._flyte_entity = workflow_node.flyte_workflow or workflow_node.flyte_launch_plan + else: + self._flyte_entity = branch_node or gate_node or array_node + + super(FlyteNode, self).__init__( + id=id, + metadata=metadata, + inputs=bindings, + upstream_node_ids=[n.id for n in upstream_nodes], + output_aliases=[], + task_node=task_node, + workflow_node=workflow_node, + branch_node=branch_node, + gate_node=gate_node, + array_node=array_node, + ) + self._upstream = upstream_nodes + + @property + def task_node(self) -> Optional[FlyteTaskNode]: + return self._flyte_task_node + + @property + def flyte_entity(self) -> Union[FlyteTask, FlyteWorkflow, FlyteLaunchPlan, FlyteBranchNode]: + return self._flyte_entity + + @classmethod + def _promote_task_node(cls, t: FlyteTask) -> FlyteTaskNode: + return FlyteTaskNode.promote_from_model(t) + + @classmethod + def _promote_workflow_node( + cls, + wn: _workflow_model.WorkflowNode, + sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], + node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], + tasks: Dict[Identifier, FlyteTask], + converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], + ) -> Tuple[FlyteWorkflowNode, Dict[id_models.Identifier, FlyteWorkflow]]: + return FlyteWorkflowNode.promote_from_model( + wn, + sub_workflows, + node_launch_plans, + tasks, + converted_sub_workflows, + ) + + @classmethod + def promote_from_model( + cls, + model: _workflow_model.Node, + sub_workflows: Optional[Dict[id_models.Identifier, _workflow_model.WorkflowTemplate]], + node_launch_plans: Optional[Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec]], + tasks: Dict[id_models.Identifier, FlyteTask], + converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], + ) -> Tuple[Optional[FlyteNode], Dict[id_models.Identifier, FlyteWorkflow]]: + node_model_id = model.id + # TODO: Consider removing + if id in {_constants.START_NODE_ID, _constants.END_NODE_ID}: + logger.warning(f"Should not call promote from model on a start node or end node {model}") + return None, converted_sub_workflows + + flyte_task_node, flyte_workflow_node, flyte_branch_node, flyte_gate_node, flyte_array_node = ( + None, + None, + None, + None, + None, + ) + if model.task_node is not None: + if model.task_node.reference_id not in tasks: + raise RuntimeError( + f"Remote Workflow closure does not have task with id {model.task_node.reference_id}." + ) + flyte_task_node = cls._promote_task_node(tasks[model.task_node.reference_id]) + elif model.workflow_node is not None: + flyte_workflow_node, converted_sub_workflows = cls._promote_workflow_node( + model.workflow_node, + sub_workflows, + node_launch_plans, + tasks, + converted_sub_workflows, + ) + elif model.branch_node is not None: + flyte_branch_node, converted_sub_workflows = FlyteBranchNode.promote_from_model( + model.branch_node, + sub_workflows, + node_launch_plans, + tasks, + converted_sub_workflows, + ) + elif model.gate_node is not None: + flyte_gate_node = FlyteGateNode.promote_from_model(model.gate_node) + elif model.array_node is not None: + flyte_array_node = FlyteArrayNode.promote_from_model(model.array_node) + # TODO: validate task in tasks + else: + raise _system_exceptions.FlyteSystemException( + f"Bad Node model, neither task nor workflow detected, node: {model}" + ) + + # When WorkflowTemplate models (containing node models) are returned by Admin, they've been compiled with a + # start node. In order to make the promoted FlyteWorkflow look the same, we strip the start-node text back out. + # TODO: Consider removing + for model_input in model.inputs: + if ( + model_input.binding.promise is not None + and model_input.binding.promise.node_id == _constants.START_NODE_ID + ): + model_input.binding.promise._node_id = _constants.GLOBAL_INPUT_NODE_ID + + return ( + cls( + id=node_model_id, + upstream_nodes=[], # set downstream, model doesn't contain this information + bindings=model.inputs, + metadata=model.metadata, + task_node=flyte_task_node, + workflow_node=flyte_workflow_node, + branch_node=flyte_branch_node, + gate_node=flyte_gate_node, + array_node=flyte_array_node, + ), + converted_sub_workflows, + ) + + @property + def upstream_nodes(self) -> List[FlyteNode]: + return self._upstream + + @property + def upstream_node_ids(self) -> List[str]: + return list(sorted(n.id for n in self.upstream_nodes)) + + def __repr__(self) -> str: + return f"Node(ID: {self.id})" + + +class FlyteWorkflow(_hash_mixin.HashOnReferenceMixin, RemoteEntity, WorkflowSpec): + """A class encapsulating a remote Flyte workflow.""" + + def __init__( + self, + id: id_models.Identifier, + nodes: List[FlyteNode], + interface, + output_bindings, + metadata, + metadata_defaults, + subworkflows: Optional[List[FlyteWorkflow]] = None, + tasks: Optional[List[FlyteTask]] = None, + launch_plans: Optional[Dict[id_models.Identifier, launch_plan_models.LaunchPlanSpec]] = None, + compiled_closure: Optional[compiler_models.CompiledWorkflowClosure] = None, + should_register: bool = False, + ): + # TODO: Remove check + for node in nodes: + for upstream in node.upstream_nodes: + if upstream.id is None: + raise _user_exceptions.FlyteAssertion( + "Some nodes contained in the workflow were not found in the workflow description. Please " + "ensure all nodes are either assigned to attributes within the class or an element in a " + "list, dict, or tuple which is stored as an attribute in the class." + ) + + self._flyte_sub_workflows = subworkflows + template_subworkflows = [] + if subworkflows: + template_subworkflows = [swf.template for swf in subworkflows] + + super(FlyteWorkflow, self).__init__( + template=_workflow_models.WorkflowTemplate( + id=id, + metadata=metadata, + metadata_defaults=metadata_defaults, + interface=interface, + nodes=nodes, + outputs=output_bindings, + ), + sub_workflows=template_subworkflows, + ) + self._flyte_nodes = nodes + + # Optional things that we save for ease of access when promoting from a model or CompiledWorkflowClosure + self._tasks = tasks + self._launch_plans = launch_plans + self._compiled_closure = compiled_closure + self._node_map = None + self._name = id.name + self._should_register = should_register + + @property + def name(self) -> str: + return self._name + + @property + def flyte_tasks(self) -> Optional[List[FlyteTask]]: + return self._tasks + + @property + def should_register(self) -> bool: + return self._should_register + + @property + def flyte_sub_workflows(self) -> List[FlyteWorkflow]: + return self._flyte_sub_workflows + + @property + def entity_type_text(self) -> str: + return "Workflow" + + @property + def resource_type(self): + return id_models.ResourceType.WORKFLOW + + @property + def flyte_nodes(self) -> List[FlyteNode]: + return self._flyte_nodes + + @property + def id(self) -> Identifier: + """ + This is an autogenerated id by the system. The id is globally unique across Flyte. + """ + return self.template.id + + @property + def metadata(self) -> WorkflowMetadata: + """ + This contains information on how to run the workflow. + """ + return self.template.metadata + + @property + def metadata_defaults(self) -> WorkflowMetadataDefaults: + """ + This contains information on how to run the workflow. + :rtype: WorkflowMetadataDefaults + """ + return self.template.metadata_defaults + + @property + def interface(self) -> TypedInterface: + """ + Defines a strongly typed interface for the Workflow (inputs, outputs). This can include some optional + parameters. + """ + return self.template.interface + + @property + def nodes(self) -> List[Node]: + """ + A list of nodes. In addition, "globals" is a special reserved node id that can be used to consume + workflow inputs + """ + return self.template.nodes + + @property + def outputs(self) -> List[Binding]: + """ + A list of output bindings that specify how to construct workflow outputs. Bindings can + pull node outputs or specify literals. All workflow outputs specified in the interface field must be bound + in order for the workflow to be validated. A workflow has an implicit dependency on all of its nodes + to execute successfully in order to bind final outputs. + """ + return self.template.outputs + + @property + def failure_node(self) -> Node: + """ + Node failure_node: A catch-all node. This node is executed whenever the execution engine determines the + workflow has failed. The interface of this node must match the Workflow interface with an additional input + named "error" of type pb.lyft.flyte.core.Error. + """ + return self.template.failure_node + + @classmethod + def get_non_system_nodes(cls, nodes: List[_workflow_models.Node]) -> List[_workflow_models.Node]: + return [n for n in nodes if n.id not in {_constants.START_NODE_ID, _constants.END_NODE_ID}] + + @classmethod + def _promote_node( + cls, + model: _workflow_model.Node, + sub_workflows: Optional[Dict[id_models.Identifier, _workflow_model.WorkflowTemplate]], + node_launch_plans: Optional[Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec]], + tasks: Dict[id_models.Identifier, FlyteTask], + converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], + ) -> Tuple[Optional[FlyteNode], Dict[id_models.Identifier, FlyteWorkflow]]: + return FlyteNode.promote_from_model(model, sub_workflows, node_launch_plans, tasks, converted_sub_workflows) + + @classmethod + def promote_from_model( + cls, + base_model: _workflow_models.WorkflowTemplate, + sub_workflows: Optional[Dict[Identifier, _workflow_models.WorkflowTemplate]] = None, + tasks: Optional[Dict[Identifier, FlyteTask]] = None, + node_launch_plans: Optional[Dict[Identifier, launch_plan_models.LaunchPlanSpec]] = None, + ) -> FlyteWorkflow: + base_model_non_system_nodes = cls.get_non_system_nodes(base_model.nodes) + + node_map = {} + converted_sub_workflows = {} + for node in base_model_non_system_nodes: + flyte_node, converted_sub_workflows = cls._promote_node( + node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows + ) + node_map[node.id] = flyte_node + + # Set upstream nodes for each node + for n in base_model_non_system_nodes: + current = node_map[n.id] + for upstream_id in n.upstream_node_ids: + upstream_node = node_map[upstream_id] + current._upstream.append(upstream_node) + + subworkflow_list = [] + if converted_sub_workflows: + subworkflow_list = [v for _, v in converted_sub_workflows.items()] + + task_list = [] + if tasks: + task_list = [t for _, t in tasks.items()] + + # No inputs/outputs specified, see the constructor for more information on the overrides. + wf = cls( + id=base_model.id, + nodes=list(node_map.values()), + metadata=base_model.metadata, + metadata_defaults=base_model.metadata_defaults, + interface=_interfaces.TypedInterface.promote_from_model(base_model.interface), + output_bindings=base_model.outputs, + subworkflows=subworkflow_list, + tasks=task_list, + launch_plans=node_launch_plans, + ) + + wf._node_map = node_map + + return wf + + @classmethod + def _promote_task(cls, t: _task_models.TaskTemplate) -> FlyteTask: + return FlyteTask.promote_from_model(t) + + @classmethod + def promote_from_closure( + cls, + closure: compiler_models.CompiledWorkflowClosure, + node_launch_plans: Optional[Dict[id_models, launch_plan_models.LaunchPlanSpec]] = None, + ): + """ + Extracts out the relevant portions of a FlyteWorkflow from a closure from the control plane. + + :param closure: This is the closure returned by Admin + :param node_launch_plans: The reason this exists is because the compiled closure doesn't have launch plans. + It only has subworkflows and tasks. Why this is unclear. If supplied, this map of launch plans will be + """ + sub_workflows = {sw.template.id: sw.template for sw in closure.sub_workflows} + tasks = {} + if closure.tasks: + tasks = {t.template.id: cls._promote_task(t.template) for t in closure.tasks} + + flyte_wf = cls.promote_from_model( + base_model=closure.primary.template, + sub_workflows=sub_workflows, + node_launch_plans=node_launch_plans, + tasks=tasks, + ) + flyte_wf._compiled_closure = closure + return flyte_wf + + +class FlyteLaunchPlan(hash_mixin.HashOnReferenceMixin, RemoteEntity, _launch_plan_models.LaunchPlanSpec): + """A class encapsulating a remote Flyte launch plan.""" + + def __init__(self, id, *args, **kwargs): + super(FlyteLaunchPlan, self).__init__(*args, **kwargs) + # Set all the attributes we expect this class to have + self._id = id + self._name = id.name + + # The interface is not set explicitly unless fetched in an engine context + self._interface = None + # If fetched when creating this object, can store it here. + self._flyte_workflow = None + + @property + def name(self) -> str: + return self._name + + @property + def flyte_workflow(self) -> Optional[FlyteWorkflow]: + return self._flyte_workflow + + @classmethod + def promote_from_model(cls, id: id_models.Identifier, model: _launch_plan_models.LaunchPlanSpec) -> FlyteLaunchPlan: + lp = cls( + id=id, + workflow_id=model.workflow_id, + default_inputs=_interface_models.ParameterMap(model.default_inputs.parameters), + fixed_inputs=model.fixed_inputs, + entity_metadata=model.entity_metadata, + labels=model.labels, + annotations=model.annotations, + auth_role=model.auth_role, + raw_output_data_config=model.raw_output_data_config, + max_parallelism=model.max_parallelism, + security_context=model.security_context, + ) + return lp + + @property + def id(self) -> id_models.Identifier: + return self._id + + @property + def is_scheduled(self) -> bool: + if self.entity_metadata.schedule.cron_expression: + return True + elif self.entity_metadata.schedule.rate and self.entity_metadata.schedule.rate.value: + return True + elif self.entity_metadata.schedule.cron_schedule and self.entity_metadata.schedule.cron_schedule.schedule: + return True + else: + return False + + @property + def workflow_id(self) -> id_models.Identifier: + return self._workflow_id + + @property + def interface(self) -> Optional[_interface.TypedInterface]: + """ + The interface is not technically part of the admin.LaunchPlanSpec in the IDL, however the workflow ID is, and + from the workflow ID, fetch will fill in the interface. This is nice because then you can __call__ the= + object and get a node. + """ + return self._interface + + @property + def resource_type(self) -> id_models.ResourceType: + return id_models.ResourceType.LAUNCH_PLAN + + @property + def entity_type_text(self) -> str: + return "Launch Plan" + + def compile(self, ctx: FlyteContext, *args, **kwargs): + fixed_input_lits = self.fixed_inputs.literals or {} + default_input_params = self.default_inputs.parameters or {} + return create_and_link_node_from_remote( + ctx, + entity=self, + _inputs_not_allowed=set(fixed_input_lits.keys()), + _ignorable_inputs=set(default_input_params.keys()), + **kwargs, + ) # noqa + + def __repr__(self) -> str: + return f"FlyteLaunchPlan(ID: {self.id} Interface: {self.interface}) - Spec {super().__repr__()})" diff --git a/flyrs/remote/executions.py b/flyrs/remote/executions.py new file mode 100644 index 0000000000..c77acc69e9 --- /dev/null +++ b/flyrs/remote/executions.py @@ -0,0 +1,212 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import Dict, List, Optional, Union + +from flytekit.core.type_engine import LiteralsResolver +from flytekit.exceptions import user as user_exceptions +from flytekit.models import execution as execution_models +from flytekit.models import node_execution as node_execution_models +from flytekit.models.admin import task_execution as admin_task_execution_models +from flytekit.models.core import execution as core_execution_models +from remote.entities import FlyteTask, FlyteWorkflow + + +class RemoteExecutionBase(object): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._inputs: Optional[LiteralsResolver] = None + self._outputs: Optional[LiteralsResolver] = None + + @property + def inputs(self) -> Optional[LiteralsResolver]: + return self._inputs + + @property + @abstractmethod + def error(self) -> core_execution_models.ExecutionError: + ... + + @property + @abstractmethod + def is_done(self) -> bool: + ... + + @property + def outputs(self) -> Optional[LiteralsResolver]: + """ + :return: Returns the outputs LiteralsResolver to the execution + :raises: ``FlyteAssertion`` error if execution is in progress or execution ended in error. + """ + if not self.is_done: + raise user_exceptions.FlyteAssertion( + "Please wait until the execution has completed before requesting the outputs." + ) + if self.error: + raise user_exceptions.FlyteAssertion("Outputs could not be found because the execution ended in failure.") + + return self._outputs + + +class FlyteTaskExecution(RemoteExecutionBase, admin_task_execution_models.TaskExecution): + """A class encapsulating a task execution being run on a Flyte remote backend.""" + + def __init__(self, *args, **kwargs): + super(FlyteTaskExecution, self).__init__(*args, **kwargs) + self._flyte_task = None + + @property + def task(self) -> Optional[FlyteTask]: + return self._flyte_task + + @property + def is_done(self) -> bool: + """Whether or not the execution is complete.""" + return self.closure.phase in { + core_execution_models.TaskExecutionPhase.ABORTED, + core_execution_models.TaskExecutionPhase.FAILED, + core_execution_models.TaskExecutionPhase.SUCCEEDED, + } + + @property + def error(self) -> Optional[core_execution_models.ExecutionError]: + """ + If execution is in progress, raise an exception. Otherwise, return None if no error was present upon + reaching completion. + """ + if not self.is_done: + raise user_exceptions.FlyteAssertion( + "Please what until the task execution has completed before requesting error information." + ) + return self.closure.error + + @classmethod + def promote_from_model(cls, base_model: admin_task_execution_models.TaskExecution) -> "FlyteTaskExecution": + return cls( + closure=base_model.closure, + id=base_model.id, + input_uri=base_model.input_uri, + is_parent=base_model.is_parent, + ) + + +class FlyteWorkflowExecution(RemoteExecutionBase, execution_models.Execution): + """A class encapsulating a workflow execution being run on a Flyte remote backend.""" + + def __init__(self, *args, **kwargs): + super(FlyteWorkflowExecution, self).__init__(*args, **kwargs) + self._node_executions = None + self._flyte_workflow: Optional[FlyteWorkflow] = None + + @property + def flyte_workflow(self) -> Optional[FlyteWorkflow]: + return self._flyte_workflow + + @property + def node_executions(self) -> Dict[str, "FlyteNodeExecution"]: + """Get a dictionary of node executions that are a part of this workflow execution.""" + return self._node_executions or {} + + @property + def error(self) -> core_execution_models.ExecutionError: + """ + If execution is in progress, raise an exception. Otherwise, return None if no error was present upon + reaching completion. + """ + if not self.is_done: + raise user_exceptions.FlyteAssertion( + "Please wait until a workflow has completed before checking for an error." + ) + return self.closure.error + + @property + def is_done(self) -> bool: + """ + Whether or not the execution is complete. + """ + return self.closure.phase in { + core_execution_models.WorkflowExecutionPhase.ABORTED, + core_execution_models.WorkflowExecutionPhase.FAILED, + core_execution_models.WorkflowExecutionPhase.SUCCEEDED, + core_execution_models.WorkflowExecutionPhase.TIMED_OUT, + } + + @classmethod + def promote_from_model(cls, base_model: execution_models.Execution) -> "FlyteWorkflowExecution": + return cls( + closure=base_model.closure, + id=base_model.id, + spec=base_model.spec, + ) + + +class FlyteNodeExecution(RemoteExecutionBase, node_execution_models.NodeExecution): + """A class encapsulating a node execution being run on a Flyte remote backend.""" + + def __init__(self, *args, **kwargs): + super(FlyteNodeExecution, self).__init__(*args, **kwargs) + self._task_executions = None + self._workflow_executions = [] + self._underlying_node_executions = None + self._interface = None + self._flyte_node = None + + @property + def task_executions(self) -> List[FlyteTaskExecution]: + return self._task_executions or [] + + @property + def workflow_executions(self) -> List[FlyteWorkflowExecution]: + return self._workflow_executions + + @property + def subworkflow_node_executions(self) -> Dict[str, FlyteNodeExecution]: + """ + This returns underlying node executions in instances where the current node execution is + a parent node. This happens when it's either a static or dynamic subworkflow. + """ + return ( + {} + if self._underlying_node_executions is None + else {n.id.node_id: n for n in self._underlying_node_executions} + ) + + @property + def executions(self) -> List[Union[FlyteTaskExecution, FlyteWorkflowExecution]]: + return self.task_executions or self._underlying_node_executions or [] + + @property + def error(self) -> core_execution_models.ExecutionError: + """ + If execution is in progress, raise an exception. Otherwise, return None if no error was present upon + reaching completion. + """ + if not self.is_done: + raise user_exceptions.FlyteAssertion( + "Please wait until the node execution has completed before requesting error information." + ) + return self.closure.error + + @property + def is_done(self) -> bool: + """Whether or not the execution is complete.""" + return self.closure.phase in { + core_execution_models.NodeExecutionPhase.ABORTED, + core_execution_models.NodeExecutionPhase.FAILED, + core_execution_models.NodeExecutionPhase.SKIPPED, + core_execution_models.NodeExecutionPhase.SUCCEEDED, + core_execution_models.NodeExecutionPhase.TIMED_OUT, + } + + @classmethod + def promote_from_model(cls, base_model: node_execution_models.NodeExecution) -> "FlyteNodeExecution": + return cls( + closure=base_model.closure, id=base_model.id, input_uri=base_model.input_uri, metadata=base_model.metadata + ) + + @property + def interface(self) -> "flytekit.remote.interface.TypedInterface": + """ + Return the interface of the task or subworkflow associated with this node execution. + """ + return self._interface diff --git a/flyrs/friendly.py b/flyrs/remote/friendly.py similarity index 98% rename from flyrs/friendly.py rename to flyrs/remote/friendly.py index 048155bb65..8db699358c 100644 --- a/flyrs/friendly.py +++ b/flyrs/remote/friendly.py @@ -29,7 +29,6 @@ from flytekit.models.core import identifier as _identifier # Lots of refactor jobs need to be done. -# Currently only refactored `get_task()` used by fetch_task() at FlyteRemote. class RustSynchronousFlyteClient(flyrs.FlyteClient): @@ -78,7 +77,7 @@ def create_task(self, task_identifer, task_spec): :raises grpc.RpcError: """ super(RustSynchronousFlyteClient, self).create_task( - _task_pb2.TaskCreateRequest(id=task_identifer.to_flyte_idl(), spec=task_spec.to_flyte_idl()) + _task_pb2.TaskCreateRequest(id=task_identifer.to_flyte_idl(), spec=task_spec.to_flyte_idl()).SerializeToString() ) def list_task_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): @@ -115,11 +114,13 @@ def list_task_ids_paginated(self, project, domain, limit=100, token=None, sort_b limit=limit, token=token, sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) + ids = _task_pb2.IdentifierList() + ids.ParseFromString(identifier_list) return ( - [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], - str(identifier_list.token), + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in ids.entities], + str(ids.token), ) def list_tasks_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): @@ -151,20 +152,23 @@ def list_tasks_paginated(self, identifier, limit=100, token=None, filters=None, :rtype: list[flytekit.models.task.Task], Text """ task_list = super(RustSynchronousFlyteClient, self).list_tasks_paginated( - resource_list_request=_common_pb2.ResourceListRequest( + _common_pb2.ResourceListRequest( id=identifier.to_flyte_idl(), limit=limit, token=token, filters=_filters.FilterList(filters or []).to_flyte_idl(), sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) # TODO: tmp workaround - for pb in task_list.tasks: + tasks = _task_pb2.TaskList() + tasks.ParseFromString(task_list) + + for pb in tasks.tasks: pb.id.resource_type = _identifier.ResourceType.TASK return ( - [_task.Task.from_flyte_idl(task_pb2) for task_pb2 in task_list.tasks], - str(task_list.token), + [_task.Task.from_flyte_idl(task_pb2) for task_pb2 in tasks.tasks], + str(tasks.token), ) def get_task(self, id): @@ -175,13 +179,13 @@ def get_task(self, id): :raises: TODO :rtype: flytekit.models.task.Task """ - t = _task_pb2.Task() - t.ParseFromString( + task = _task_pb2.Task() + task.ParseFromString( super(RustSynchronousFlyteClient, self).get_task( _common_pb2.ObjectGetRequest(id=id.to_flyte_idl()).SerializeToString() ) ) - return _task.Task.from_flyte_idl(t) + return _task.Task.from_flyte_idl(task) #################################################################################################################### # diff --git a/flyrs/remote/interface.py b/flyrs/remote/interface.py new file mode 100644 index 0000000000..df61c8e336 --- /dev/null +++ b/flyrs/remote/interface.py @@ -0,0 +1,11 @@ +from flytekit.models import interface as _interface_models + + +class TypedInterface(_interface_models.TypedInterface): + @classmethod + def promote_from_model(cls, model): + """ + :param flytekit.models.interface.TypedInterface model: + :rtype: TypedInterface + """ + return cls(model.inputs, model.outputs) diff --git a/flyrs/remote/lazy_entity.py b/flyrs/remote/lazy_entity.py new file mode 100644 index 0000000000..abfcd7da06 --- /dev/null +++ b/flyrs/remote/lazy_entity.py @@ -0,0 +1,67 @@ +import typing +from threading import Lock + +from flytekit import FlyteContext +from remote.remote_callable import RemoteEntity + +T = typing.TypeVar("T", bound=RemoteEntity) + + +class LazyEntity(RemoteEntity, typing.Generic[T]): + """ + Fetches the entity when the entity is called or when the entity is retrieved. + The entity is derived from RemoteEntity so that it behaves exactly like the mimicked entity. + """ + + def __init__(self, name: str, getter: typing.Callable[[], T], *args, **kwargs): + super().__init__(*args, **kwargs) + self._entity = None + self._getter = getter + self._name = name + if not self._getter: + raise ValueError("getter method is required to create a Lazy loadable Remote Entity.") + self._mutex = Lock() + + @property + def name(self) -> str: + return self._name + + def entity_fetched(self) -> bool: + with self._mutex: + return self._entity is not None + + @property + def entity(self) -> T: + """ + If not already fetched / available, then the entity will be force fetched. + """ + with self._mutex: + if self._entity is None: + try: + self._entity = self._getter() + except AttributeError as e: + raise RuntimeError( + f"Error downloading the entity {self._name}, (check original exception...)" + ) from e + return self._entity + + def __getattr__(self, item: str) -> typing.Any: + """ + Forwards all other attributes to entity, causing the entity to be fetched! + """ + return getattr(self.entity, item) + + def compile(self, ctx: FlyteContext, *args, **kwargs): + return self.entity.compile(ctx, *args, **kwargs) + + def __call__(self, *args, **kwargs): + """ + Forwards the call to the underlying entity. The entity will be fetched if not already present + """ + return self.entity(*args, **kwargs) + + def __repr__(self) -> str: + return str(self) + + def __str__(self) -> str: + return f"Promise for entity [{self._name}]" diff --git a/flyrs/remote.py b/flyrs/remote/remote.py similarity index 99% rename from flyrs/remote.py rename to flyrs/remote/remote.py index 8d3587ee24..12c87c007f 100644 --- a/flyrs/remote.py +++ b/flyrs/remote/remote.py @@ -28,7 +28,7 @@ from flyteidl.core import literals_pb2 from flytekit import ImageSpec -from friendly import RustSynchronousFlyteClient +from remote.friendly import RustSynchronousFlyteClient from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions from flytekit.configuration import Config, FastSerializationSettings, ImageConfig, SerializationSettings from flytekit.core import constants, utils @@ -73,14 +73,14 @@ ) from flytekit.models.launch_plan import LaunchPlanState from flytekit.models.literals import Literal, LiteralMap -from flytekit.remote.backfill import create_backfill_workflow -from flytekit.remote.data import download_literal -from flytekit.remote.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow -from flytekit.remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution -from flytekit.remote.interface import TypedInterface -from flytekit.remote.lazy_entity import LazyEntity -from flytekit.remote.remote_callable import RemoteEntity -from flytekit.remote.remote_fs import get_flyte_fs +from remote.backfill import create_backfill_workflow +from remote.data import download_literal +from remote.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow +from remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution +from remote.interface import TypedInterface +from remote.lazy_entity import LazyEntity +from remote.remote_callable import RemoteEntity +from remote.remote_fs import get_flyte_fs from flytekit.tools.fast_registration import fast_package from flytekit.tools.interactive import ipython_check from flytekit.tools.script_mode import _find_project_root, compress_scripts, hash_file diff --git a/flyrs/remote/remote_callable.py b/flyrs/remote/remote_callable.py new file mode 100644 index 0000000000..5b177bf7c4 --- /dev/null +++ b/flyrs/remote/remote_callable.py @@ -0,0 +1,75 @@ +from abc import ABC, abstractmethod +from typing import Any, Dict, Optional, Tuple, Type, Union + +from flytekit.core.context_manager import BranchEvalMode, ExecutionState, FlyteContext +from flytekit.core.promise import Promise, VoidPromise, create_and_link_node_from_remote, extract_obj_name +from flytekit.exceptions import user as user_exceptions +from flytekit.loggers import logger +from flytekit.models.core.workflow import NodeMetadata + + +class RemoteEntity(ABC): + def __init__(self, *args, **kwargs): + # In cases where we make a FlyteTask/Workflow/LaunchPlan from a locally created Python object (i.e. an @task + # or an @workflow decorated function), we actually have the Python interface, so + self._python_interface: Optional[Dict[str, Type]] = None + + super().__init__(*args, **kwargs) + + @property + @abstractmethod + def name(self) -> str: + ... + + def construct_node_metadata(self) -> NodeMetadata: + """ + Used when constructing the node that encapsulates this task as part of a broader workflow definition. + """ + return NodeMetadata( + name=extract_obj_name(self.name), + ) + + def compile(self, ctx: FlyteContext, *args, **kwargs): + return create_and_link_node_from_remote(ctx, entity=self, **kwargs) # noqa + + def __call__(self, *args, **kwargs): + # When a Task is () aka __called__, there are three things we may do: + # a. Plain execution Mode - just run the execute function. If not overridden, we should raise an exception + # b. Compilation Mode - this happens when the function is called as part of a workflow (potentially + # dynamic task). Produce promise objects and create a node. + # c. Workflow Execution Mode - when a workflow is being run locally. Even though workflows are functions + # and everything should be able to be passed through naturally, we'll want to wrap output values of the + # function into objects, so that potential .with_cpu or other ancillary functions can be attached to do + # nothing. Subsequent tasks will have to know how to unwrap these. If by chance a non-Flyte task uses a + # task output as an input, things probably will fail pretty obviously. + # Since this is a reference entity, it still needs to be mocked otherwise an exception will be raised. + if len(args) > 0: + raise user_exceptions.FlyteAssertion( + f"Cannot call remotely fetched entity with args - detected {len(args)} positional args {args}" + ) + + ctx = FlyteContext.current_context() + if ctx.compilation_state is not None and ctx.compilation_state.mode == 1: + return self.compile(ctx, *args, **kwargs) + elif ( + ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION + ): + if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED: + return + return self.local_execute(ctx, **kwargs) + else: + logger.debug("Fetched entity, running raw execute.") + return self.execute(**kwargs) + + def local_execute(self, ctx: FlyteContext, **kwargs) -> Optional[Union[Tuple[Promise], Promise, VoidPromise]]: + return self.execute(**kwargs) + + def local_execution_mode(self) -> ExecutionState.Mode: + return ExecutionState.Mode.LOCAL_TASK_EXECUTION + + def execute(self, **kwargs) -> Any: + raise AssertionError(f"Remotely fetched entities cannot be run locally. Please mock the {self.name}.execute.") + + @property + def python_interface(self) -> Optional[Dict[str, Type]]: + return self._python_interface diff --git a/flyrs/remote/remote_fs.py b/flyrs/remote/remote_fs.py new file mode 100644 index 0000000000..10131f63fa --- /dev/null +++ b/flyrs/remote/remote_fs.py @@ -0,0 +1,266 @@ +from __future__ import annotations + +import base64 +import hashlib +import os +import pathlib +import random +import threading +import typing +from uuid import UUID + +import fsspec +import requests +from fsspec.callbacks import NoOpCallback +from fsspec.implementations.http import HTTPFileSystem +from fsspec.utils import get_protocol + +from flytekit.loggers import logger +from flytekit.tools.script_mode import hash_file + +if typing.TYPE_CHECKING: + from flytekit.remote.remote import FlyteRemote + +_DEFAULT_CALLBACK = NoOpCallback() +_PREFIX_KEY = "upload_prefix" +_HASHES_KEY = "hashes" +# This file system is not really a filesystem, so users aren't really able to specify the remote path, +# at least not yet. +REMOTE_PLACEHOLDER = "flyte://data" + +HashStructure = typing.Dict[str, typing.Tuple[bytes, int]] + + +class FlytePathResolver: + protocol = "flyte://" + _flyte_path_to_remote_map: typing.Dict[str, str] = {} + _lock = threading.Lock() + + @classmethod + def resolve_remote_path(cls, flyte_uri: str) -> typing.Optional[str]: + """ + Given a flyte uri, return the remote path if it exists or was created in current session, otherwise return None + """ + with cls._lock: + if flyte_uri in cls._flyte_path_to_remote_map: + return cls._flyte_path_to_remote_map[flyte_uri] + return None + + @classmethod + def add_mapping(cls, flyte_uri: str, remote_path: str): + """ + Thread safe method to dd a mapping from a flyte uri to a remote path + """ + with cls._lock: + cls._flyte_path_to_remote_map[flyte_uri] = remote_path + + +class HttpFileWriter(fsspec.spec.AbstractBufferedFile): + def __init__(self, remote: FlyteRemote, filename: str, **kwargs): + super().__init__(**kwargs) + self._remote = remote + self._filename = filename + + def _upload_chunk(self, final=False): + """Only uploads the file at once from the buffer. + Not suitable for large files as the buffer will blow the memory for very large files. + Suitable for default values or local dataframes being uploaded all at once. + """ + if final is False: + return False + self.buffer.seek(0) + data = self.buffer.read() + + try: + res = self._remote.client.get_upload_signed_url( + self._remote.default_project, + self._remote.default_domain, + None, + None, + filename_root=self._filename, + ) + FlytePathResolver.add_mapping(self.path, res.native_url) + resp = requests.put(res.signed_url, data=data) + if not resp.ok: + raise AssertionError(f"Failed to upload file {self._filename} to {res.signed_url} reason {resp.reason}") + except Exception as e: + raise AssertionError(f"Failed to upload file {self._filename} reason {e}") + + +def get_flyte_fs(remote: FlyteRemote) -> typing.Type[FlyteFS]: + class _FlyteFS(FlyteFS): + def __init__(self, **storage_options): + super().__init__(remote=remote, **storage_options) + + return _FlyteFS + + +class FlyteFS(HTTPFileSystem): + """ + Want this to behave mostly just like the HTTP file system. + """ + + sep = "/" + protocol = "flyte" + + def __init__( + self, + remote: FlyteRemote, + asynchronous: bool = False, + **storage_options, + ): + super().__init__(asynchronous=asynchronous, **storage_options) + self._remote = remote + + @property + def fsid(self) -> str: + return "flyte" + + async def _get_file(self, rpath, lpath, **kwargs): + """ + Don't do anything special. If it's a flyte url, the create a download link and write to lpath, + otherwise default to parent. + """ + raise NotImplementedError("FlyteFS currently doesn't support downloading files.") + + async def _put_file( + self, + lpath, + rpath, + chunk_size=5 * 2**20, + callback=_DEFAULT_CALLBACK, + method="put", + **kwargs, + ): + """ + fsspec will call this method to upload a file. If recursive, rpath will already be individual files. + Make the request and upload, but then how do we get the s3 paths back to the user? + """ + prefix = kwargs.pop(_PREFIX_KEY) + _, native_url = self._remote.upload_file( + pathlib.Path(lpath), self._remote.default_project, self._remote.default_domain, prefix + ) + return native_url + + @staticmethod + def extract_common(native_urls: typing.List[str]) -> str: + """ + This function that will take a list of strings and return the longest prefix that they all have in common. + That is, if you have + ['s3://my-s3-bucket/flytesnacks/development/ABCYZWMPACZAJ2MABGMOZ6CCPY======/source/empty.md', + 's3://my-s3-bucket/flytesnacks/development/ABCXKL5ZZWXY3PDLM3OONUHHME======/source/nested/more.txt', + 's3://my-s3-bucket/flytesnacks/development/ABCXBAPBKONMADXVW5Q3J6YBWM======/source/original.txt'] + this will return back 's3://my-s3-bucket/flytesnacks/development/' + Note that trailing characters after a separator that just happen to be the same will also be stripped. + """ + if len(native_urls) == 0: + return "" + if len(native_urls) == 1: + return native_urls[0] + + common_prefix = "" + shortest = min([len(x) for x in native_urls]) + x = [[native_urls[j][i] for j in range(len(native_urls))] for i in range(shortest)] + for i in x: + if len(set(i)) == 1: + common_prefix += i[0] + else: + break + + fs = fsspec.filesystem(get_protocol(native_urls[0])) + sep = fs.sep + # split the common prefix on the last separator so we don't get any trailing characters. + common_prefix = common_prefix.rsplit(sep, 1)[0] + logger.debug(f"Returning {common_prefix} from {native_urls}") + return common_prefix + + def get_hashes_and_lengths(self, p: pathlib.Path) -> HashStructure: + """ + Returns a flat list of absolute file paths to their hashes and content lengths + this output is used both for the file upload request, and to create consistently a filename root for + uploaded folders. We'll also use it for single files just for consistency. + If a directory then all the files in the directory will be hashed. + If a single file then just that file will be hashed. + Skip symlinks + """ + if p.is_symlink(): + return {} + if p.is_dir(): + hashes = {} + for f in p.iterdir(): + hashes.update(self.get_hashes_and_lengths(f)) + return hashes + else: + md5_bytes, _, content_length = hash_file(p.resolve()) + return {str(p.absolute()): (md5_bytes, content_length)} + + @staticmethod + def get_filename_root(file_info: HashStructure) -> str: + """ + Given a dictionary of file paths to hashes and content lengths, return a consistent filename root. + This is done by hashing the sorted list of file paths and then base32 encoding the result. + If the input is empty, then generate a random string + """ + if len(file_info) == 0: + return UUID(int=random.getrandbits(128)).hex + sorted_paths = sorted(file_info.keys()) + h = hashlib.md5() + for p in sorted_paths: + h.update(file_info[p][0]) + return base64.b32encode(h.digest()).decode("utf-8") + + async def _put( + self, + lpath, + rpath, + recursive=False, + callback=_DEFAULT_CALLBACK, + batch_size=None, + **kwargs, + ): + """ + cp file.txt flyte://data/... + rpath gets ignored, so it doesn't matter what it is. + """ + # Hash everything at the top level + file_info = self.get_hashes_and_lengths(pathlib.Path(lpath)) + prefix = self.get_filename_root(file_info) + + kwargs[_PREFIX_KEY] = prefix + kwargs[_HASHES_KEY] = file_info + res = await super()._put(lpath, REMOTE_PLACEHOLDER, recursive, callback, batch_size, **kwargs) + if isinstance(res, list): + res = self.extract_common(res) + FlytePathResolver.add_mapping(rpath.strip(os.path.sep), res) + return res + + async def _isdir(self, path): + return True + + def exists(self, path, **kwargs): + raise NotImplementedError("flyte file system currently can't check if a file exists.") + + def _open( + self, + path, + mode="wb", + block_size=None, + autocommit=None, # XXX: This differs from the base class. + cache_type=None, + cache_options=None, + size=None, + **kwargs, + ): + if mode != "wb": + raise ValueError("Only wb mode is supported") + + # Dataframes are written as multiple files, default is the first file with 00000 suffix, we should drop + # that suffix and use the parent directory as the remote path. + + return HttpFileWriter( + self._remote, os.path.basename(path), fs=self, path=os.path.dirname(path), mode=mode, **kwargs + ) + + def __str__(self): + p = super().__str__() + return f"FlyteFS({self._remote}): {p}" diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index be92117e93..3be3debb9a 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -5,7 +5,7 @@ use tokio::runtime::{Builder, Runtime}; use tonic::transport::Channel; use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; -use flyteidl::flyteidl::admin::{Task, ObjectGetRequest, ResourceListRequest, TaskExecutionGetRequest}; +use flyteidl::flyteidl::admin;//::{Task, ObjectGetRequest, ResourceListRequest, NamedEntityIdentifierListRequest, TaskExecutionGetRequest}; // Unlike the normal use case of PyO3, we don't have to add attribute macros such as #[pyclass] or #[pymethods] to all of our flyteidl structs. // In this case, we only use PyO3 to expose the client class and its methods to Python (FlyteKit). @@ -43,9 +43,23 @@ impl FlyteClient { // fn serialize_tobytes(proto) { // } + pub fn create_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::TaskCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.create_task(req))).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { let bytes = bytes_obj.as_bytes(); - let decoded: ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); let req = tonic::Request::new(decoded); // Interacting with the gRPC server: flyteadmin @@ -57,9 +71,23 @@ impl FlyteClient { Ok(PyBytes::new_bound(py, &buf).into()) } + pub fn list_task_ids_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::NamedEntityIdentifierListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self.runtime.block_on(self.admin_service.list_task_ids(req)).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { let bytes = bytes_obj.as_bytes(); - let decoded: ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); let req = tonic::Request::new(decoded); // Interacting with the gRPC server: flyteadmin @@ -74,7 +102,7 @@ impl FlyteClient { pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { // PyResult> let bytes = bytes_obj.as_bytes(); println!("Received bytes: {:?}", bytes); - let decoded: Task = Message::decode(&bytes.to_vec()[..]).unwrap(); + let decoded: admin::Task = Message::decode(&bytes.to_vec()[..]).unwrap(); println!("Parsed Task: {:?}", decoded); let mut buf = vec![]; decoded.encode(&mut buf).unwrap(); diff --git a/flyrs/test_FlyteRemote.py b/flyrs/test_FlyteRemote.py index 05fc32a814..b033629587 100644 --- a/flyrs/test_FlyteRemote.py +++ b/flyrs/test_FlyteRemote.py @@ -1,20 +1,29 @@ from flytekit.configuration import Config from flytekit.remote import FlyteRemote -from remote import RustFlyteRemote +from remote.remote import RustFlyteRemote PROJECT = "flytesnacks" DOMAIN = "development" remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) task_py = remote_py.fetch_task( - project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" -) -print(task_py) + project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" + ) +# print(task_py) -remote_rs = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) +remote_rs = RustFlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) task_rs = remote_rs.fetch_task( project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" ) -print(task_rs) - +# print(task_rs) assert task_py == task_rs + + +tasks_py = remote_py.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version="WhIAnhpyjrAdaRvrQ9Cjpw" +) +tasks_rs = remote_rs.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version="WhIAnhpyjrAdaRvrQ9Cjpw" +) +assert tasks_py == tasks_rs + From d704fcbf612e22af1c0247a80a5505d67753d619 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Wed, 17 Apr 2024 14:17:26 +0800 Subject: [PATCH 10/16] re-org & init Interceptor Signed-off-by: Austin Liu --- flyrs/Cargo.lock | 8 ++++++ flyrs/Cargo.toml | 1 + flyrs/{remote => clients}/backfill.py | 2 +- flyrs/{remote => clients}/data.py | 0 flyrs/{remote => clients}/entities.py | 4 +-- flyrs/{remote => clients}/executions.py | 2 +- flyrs/{remote => clients}/friendly.py | 0 flyrs/{remote => clients}/interface.py | 0 flyrs/{remote => clients}/lazy_entity.py | 2 +- flyrs/{remote => clients}/remote_callable.py | 0 flyrs/{remote => clients}/remote_fs.py | 0 flyrs/remote/remote.py | 16 +++++------ flyrs/src/lib.rs | 29 ++++++++++++++++---- 13 files changed, 46 insertions(+), 18 deletions(-) rename flyrs/{remote => clients}/backfill.py (98%) rename flyrs/{remote => clients}/data.py (100%) rename flyrs/{remote => clients}/entities.py (99%) rename flyrs/{remote => clients}/executions.py (99%) rename flyrs/{remote => clients}/friendly.py (100%) rename flyrs/{remote => clients}/interface.py (100%) rename flyrs/{remote => clients}/lazy_entity.py (97%) rename flyrs/{remote => clients}/remote_callable.py (100%) rename flyrs/{remote => clients}/remote_fs.py (100%) diff --git a/flyrs/Cargo.lock b/flyrs/Cargo.lock index 6aed12e917..0f37693a4e 100644 --- a/flyrs/Cargo.lock +++ b/flyrs/Cargo.lock @@ -173,6 +173,7 @@ dependencies = [ "pyo3", "tokio", "tonic", + "tower", ] [[package]] @@ -416,6 +417,12 @@ dependencies = [ "scopeguard", ] +[[package]] +name = "log" +version = "0.4.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" + [[package]] name = "matchit" version = "0.7.3" @@ -938,6 +945,7 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", diff --git a/flyrs/Cargo.toml b/flyrs/Cargo.toml index 125fbf5b90..bdbee85224 100644 --- a/flyrs/Cargo.toml +++ b/flyrs/Cargo.toml @@ -18,6 +18,7 @@ tokio = { version = "1.37.0", features = ["full"] } pyo3 = { version = "0.21", features = ["extension-module", "experimental-async"] } flyteidl = { path="../../flyte/flyteidl" } +tower = "0.4.13" [build-dependencies] diff --git a/flyrs/remote/backfill.py b/flyrs/clients/backfill.py similarity index 98% rename from flyrs/remote/backfill.py rename to flyrs/clients/backfill.py index d3d3604e37..7d06a6d631 100644 --- a/flyrs/remote/backfill.py +++ b/flyrs/clients/backfill.py @@ -6,7 +6,7 @@ from flytekit import LaunchPlan from flytekit.core.workflow import ImperativeWorkflow, WorkflowBase, WorkflowFailurePolicy -from remote.entities import FlyteLaunchPlan +from entities import FlyteLaunchPlan def create_backfill_workflow( diff --git a/flyrs/remote/data.py b/flyrs/clients/data.py similarity index 100% rename from flyrs/remote/data.py rename to flyrs/clients/data.py diff --git a/flyrs/remote/entities.py b/flyrs/clients/entities.py similarity index 99% rename from flyrs/remote/entities.py rename to flyrs/clients/entities.py index 1ffbdff7a2..f1d2069216 100644 --- a/flyrs/remote/entities.py +++ b/flyrs/clients/entities.py @@ -31,8 +31,8 @@ from flytekit.models.interface import TypedInterface from flytekit.models.literals import Binding from flytekit.models.task import TaskSpec -import remote.interface as _interfaces -from remote.remote_callable import RemoteEntity +import interface as _interfaces +from remote_callable import RemoteEntity class FlyteTask(hash_mixin.HashOnReferenceMixin, RemoteEntity, TaskSpec): diff --git a/flyrs/remote/executions.py b/flyrs/clients/executions.py similarity index 99% rename from flyrs/remote/executions.py rename to flyrs/clients/executions.py index c77acc69e9..33c44d5003 100644 --- a/flyrs/remote/executions.py +++ b/flyrs/clients/executions.py @@ -9,7 +9,7 @@ from flytekit.models import node_execution as node_execution_models from flytekit.models.admin import task_execution as admin_task_execution_models from flytekit.models.core import execution as core_execution_models -from remote.entities import FlyteTask, FlyteWorkflow +from entities import FlyteTask, FlyteWorkflow class RemoteExecutionBase(object): diff --git a/flyrs/remote/friendly.py b/flyrs/clients/friendly.py similarity index 100% rename from flyrs/remote/friendly.py rename to flyrs/clients/friendly.py diff --git a/flyrs/remote/interface.py b/flyrs/clients/interface.py similarity index 100% rename from flyrs/remote/interface.py rename to flyrs/clients/interface.py diff --git a/flyrs/remote/lazy_entity.py b/flyrs/clients/lazy_entity.py similarity index 97% rename from flyrs/remote/lazy_entity.py rename to flyrs/clients/lazy_entity.py index abfcd7da06..f0520b5296 100644 --- a/flyrs/remote/lazy_entity.py +++ b/flyrs/clients/lazy_entity.py @@ -2,7 +2,7 @@ from threading import Lock from flytekit import FlyteContext -from remote.remote_callable import RemoteEntity +from remote_callable import RemoteEntity T = typing.TypeVar("T", bound=RemoteEntity) diff --git a/flyrs/remote/remote_callable.py b/flyrs/clients/remote_callable.py similarity index 100% rename from flyrs/remote/remote_callable.py rename to flyrs/clients/remote_callable.py diff --git a/flyrs/remote/remote_fs.py b/flyrs/clients/remote_fs.py similarity index 100% rename from flyrs/remote/remote_fs.py rename to flyrs/clients/remote_fs.py diff --git a/flyrs/remote/remote.py b/flyrs/remote/remote.py index 12c87c007f..ac8dab70a9 100644 --- a/flyrs/remote/remote.py +++ b/flyrs/remote/remote.py @@ -73,14 +73,14 @@ ) from flytekit.models.launch_plan import LaunchPlanState from flytekit.models.literals import Literal, LiteralMap -from remote.backfill import create_backfill_workflow -from remote.data import download_literal -from remote.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow -from remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution -from remote.interface import TypedInterface -from remote.lazy_entity import LazyEntity -from remote.remote_callable import RemoteEntity -from remote.remote_fs import get_flyte_fs +from clients.backfill import create_backfill_workflow +from clients.data import download_literal +from clients.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow +from clients.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution +from clients.interface import TypedInterface +from clients.lazy_entity import LazyEntity +from clients.remote_callable import RemoteEntity +from clients.remote_fs import get_flyte_fs from flytekit.tools.fast_registration import fast_package from flytekit.tools.interactive import ipython_check from flytekit.tools.script_mode import _find_project_root, compress_scripts, hash_file diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 3be3debb9a..51a76a1fab 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -2,10 +2,17 @@ use prost::{Message}; use pyo3::prelude::*; use pyo3::types::PyBytes; use tokio::runtime::{Builder, Runtime}; -use tonic::transport::Channel; +use tonic::{ + metadata::MetadataValue, + codegen::InterceptedService, + service::Interceptor, + transport::{Channel, Endpoint, Error}, + Request, Status, +}; use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; -use flyteidl::flyteidl::admin;//::{Task, ObjectGetRequest, ResourceListRequest, NamedEntityIdentifierListRequest, TaskExecutionGetRequest}; +use flyteidl::flyteidl::admin; +use std::option::Option; // Unlike the normal use case of PyO3, we don't have to add attribute macros such as #[pyclass] or #[pymethods] to all of our flyteidl structs. // In this case, we only use PyO3 to expose the client class and its methods to Python (FlyteKit). @@ -16,10 +23,20 @@ use flyteidl::flyteidl::admin;//::{Task, ObjectGetRequest, ResourceListRequest, #[pyclass(subclass)] pub struct FlyteClient { - admin_service: AdminServiceClient, + admin_service: AdminServiceClient>, runtime: Runtime, } +struct AuthUnaryInterceptor; + +impl Interceptor for AuthUnaryInterceptor { + fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { + let token: MetadataValue<_> = "Bearer some-auth-token".parse().unwrap(); + request.metadata_mut().insert("authorization", token.clone()); + Ok(request) + } +} + // Using temporary value(e.g., endpoint) in async is tricky w.r.t lifetime. // The compiler will complain that the temporary value does not live long enough. // TODO: figure out how to pass in the required initial args into constructor in a clean and neat way. @@ -29,10 +46,12 @@ impl FlyteClient { pub fn new() -> PyResult { let rt = Builder::new_multi_thread().enable_all().build().unwrap(); // TODO: Create a channel then bind it to every stubs/clients instead of connecting everytime. - let stub = rt.block_on(AdminServiceClient::connect("http://localhost:30080")).unwrap(); + let channel = rt.block_on(Endpoint::from_static("http://localhost:30080").connect()).unwrap(); + // let stub = rt.block_on(AdminServiceClient::connect("http://localhost:30080")).unwrap(); + let mut stub = AdminServiceClient::with_interceptor(channel, AuthUnaryInterceptor); // TODO: Add more thoughtful error handling Ok(FlyteClient { - runtime: rt, // The tokio runtime is used in a blocking manner now, left lots of investigation and TODOs behind. + runtime: rt, // The tokio runtime is used in a blocking manner now, leaving lots of investigation and TODOs behind. admin_service: stub, } ) From 15196a12afa45231c0eaed8997601d86678208f2 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Thu, 18 Apr 2024 22:07:07 +0800 Subject: [PATCH 11/16] port workflow api Signed-off-by: Austin Liu --- flyrs/build.sh | 1 - flyrs/clients/backfill.py | 2 +- flyrs/clients/entities.py | 4 +-- flyrs/clients/executions.py | 2 +- flyrs/clients/friendly.py | 25 +++++++++------- flyrs/clients/lazy_entity.py | 2 +- flyrs/readme.md | 11 +++++++ flyrs/remote/remote.py | 2 +- flyrs/src/lib.rs | 57 ++++++++++++++++++++++++++++++++++++ flyrs/t.py | 11 +++++++ flyrs/test_FlyteRemote.py | 29 ------------------ flyrs/test_flyte_remote.py | 38 ++++++++++++++++++++++++ 12 files changed, 138 insertions(+), 46 deletions(-) delete mode 100644 flyrs/build.sh create mode 100644 flyrs/t.py delete mode 100644 flyrs/test_FlyteRemote.py create mode 100644 flyrs/test_flyte_remote.py diff --git a/flyrs/build.sh b/flyrs/build.sh deleted file mode 100644 index 516f59fc7d..0000000000 --- a/flyrs/build.sh +++ /dev/null @@ -1 +0,0 @@ -maturin develop --release \ No newline at end of file diff --git a/flyrs/clients/backfill.py b/flyrs/clients/backfill.py index 7d06a6d631..98649410d6 100644 --- a/flyrs/clients/backfill.py +++ b/flyrs/clients/backfill.py @@ -6,7 +6,7 @@ from flytekit import LaunchPlan from flytekit.core.workflow import ImperativeWorkflow, WorkflowBase, WorkflowFailurePolicy -from entities import FlyteLaunchPlan +from clients.entities import FlyteLaunchPlan def create_backfill_workflow( diff --git a/flyrs/clients/entities.py b/flyrs/clients/entities.py index f1d2069216..9adba429a6 100644 --- a/flyrs/clients/entities.py +++ b/flyrs/clients/entities.py @@ -31,8 +31,8 @@ from flytekit.models.interface import TypedInterface from flytekit.models.literals import Binding from flytekit.models.task import TaskSpec -import interface as _interfaces -from remote_callable import RemoteEntity +import clients.interface as _interfaces +from clients.remote_callable import RemoteEntity class FlyteTask(hash_mixin.HashOnReferenceMixin, RemoteEntity, TaskSpec): diff --git a/flyrs/clients/executions.py b/flyrs/clients/executions.py index 33c44d5003..05724bb868 100644 --- a/flyrs/clients/executions.py +++ b/flyrs/clients/executions.py @@ -9,7 +9,7 @@ from flytekit.models import node_execution as node_execution_models from flytekit.models.admin import task_execution as admin_task_execution_models from flytekit.models.core import execution as core_execution_models -from entities import FlyteTask, FlyteWorkflow +from clients.entities import FlyteTask, FlyteWorkflow class RemoteExecutionBase(object): diff --git a/flyrs/clients/friendly.py b/flyrs/clients/friendly.py index 8db699358c..157dbe9ffb 100644 --- a/flyrs/clients/friendly.py +++ b/flyrs/clients/friendly.py @@ -163,7 +163,6 @@ def list_tasks_paginated(self, identifier, limit=100, token=None, filters=None, # TODO: tmp workaround tasks = _task_pb2.TaskList() tasks.ParseFromString(task_list) - for pb in tasks.tasks: pb.id.resource_type = _identifier.ResourceType.TASK return ( @@ -215,7 +214,7 @@ def create_workflow(self, workflow_identifier, workflow_spec): super(RustSynchronousFlyteClient, self).create_workflow( _workflow_pb2.WorkflowCreateRequest( id=workflow_identifier.to_flyte_idl(), spec=workflow_spec.to_flyte_idl() - ) + ).SerializeToString() ) def list_workflow_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): @@ -252,11 +251,13 @@ def list_workflow_ids_paginated(self, project, domain, limit=100, token=None, so limit=limit, token=token, sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) + ids = _task_pb2.IdentifierList() + ids.ParseFromString(identifier_list) return ( - [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], - str(identifier_list.token), + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in ids.entities], + str(ids.token), ) def list_workflows_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): @@ -297,11 +298,13 @@ def list_workflows_paginated(self, identifier, limit=100, token=None, filters=No ) ) # TODO: tmp workaround - for pb in wf_list.workflows: + workflows = _task_pb2.TaskList() + workflows.ParseFromString(wf_list) + for pb in workflows.workflows: pb.id.resource_type = _identifier.ResourceType.WORKFLOW return ( - [_workflow.Workflow.from_flyte_idl(wf_pb2) for wf_pb2 in wf_list.workflows], - str(wf_list.token), + [_workflow.Workflow.from_flyte_idl(wf_pb2) for wf_pb2 in workflows.workflows], + str(workflows.token), ) def get_workflow(self, id): @@ -312,9 +315,11 @@ def get_workflow(self, id): :raises: TODO :rtype: flytekit.models.admin.workflow.Workflow """ - return _workflow.Workflow.from_flyte_idl( - super(RustSynchronousFlyteClient, self).get_workflow(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl())) + workflow = _workflow_pb2.Workflow() + workflow.ParseFromString( + super(RustSynchronousFlyteClient, self).get_workflow(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl()).SerializeToString()) ) + return _workflow.Workflow.from_flyte_idl(workflow) #################################################################################################################### # diff --git a/flyrs/clients/lazy_entity.py b/flyrs/clients/lazy_entity.py index f0520b5296..c9cb803267 100644 --- a/flyrs/clients/lazy_entity.py +++ b/flyrs/clients/lazy_entity.py @@ -2,7 +2,7 @@ from threading import Lock from flytekit import FlyteContext -from remote_callable import RemoteEntity +from clients.remote_callable import RemoteEntity T = typing.TypeVar("T", bound=RemoteEntity) diff --git a/flyrs/readme.md b/flyrs/readme.md index e69de29bb2..46a55a1fdf 100644 --- a/flyrs/readme.md +++ b/flyrs/readme.md @@ -0,0 +1,11 @@ +### How to build Rust FlyteRemote Client? +1. Instal `matruin` to build Python packages with Rust `PyO3`. + - `pip install maturin` +2. Compile Python extension and instal it into local Python virtual environment root. + - `maturin develop` + - or `maturin develop --release` (Pass `--release` to `cargo build`) + +### How to test Rust FlyteRemote Client? +1. `pyflyte register ./t.py` to get flyte entity `version` id +2. Set previous fetched `version` id in `./test_flyte_remote.py`'s `VERSION_ID` +2. `python ./test_flyte_remote.py` in `flyrs/` \ No newline at end of file diff --git a/flyrs/remote/remote.py b/flyrs/remote/remote.py index ac8dab70a9..2b7c390d53 100644 --- a/flyrs/remote/remote.py +++ b/flyrs/remote/remote.py @@ -28,7 +28,7 @@ from flyteidl.core import literals_pb2 from flytekit import ImageSpec -from remote.friendly import RustSynchronousFlyteClient +from clients.friendly import RustSynchronousFlyteClient from flytekit.clients.helpers import iterate_node_executions, iterate_task_executions from flytekit.configuration import Config, FastSerializationSettings, ImageConfig, SerializationSettings from flytekit.core import constants, utils diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 51a76a1fab..69e2394c1f 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -130,6 +130,63 @@ impl FlyteClient { Ok(PyBytes::new_bound(py, &buf).into()) } + pub fn create_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::WorkflowCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.create_workflow(req))).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn get_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.get_workflow(req))).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn list_workflow_ids_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::NamedEntityIdentifierListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self.runtime.block_on(self.admin_service.list_workflow_ids(req)).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn list_workflows_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self.runtime.block_on(self.admin_service.list_workflows(req)).unwrap().into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + } diff --git a/flyrs/t.py b/flyrs/t.py new file mode 100644 index 0000000000..9d2fe29a27 --- /dev/null +++ b/flyrs/t.py @@ -0,0 +1,11 @@ +from flytekit import ImageSpec, StructuredDataset, kwtypes, task, workflow + + +@task +def say_hi() -> str: + return "hi" + +@workflow +def say_hi_wf(): + say_hi() + return diff --git a/flyrs/test_FlyteRemote.py b/flyrs/test_FlyteRemote.py deleted file mode 100644 index b033629587..0000000000 --- a/flyrs/test_FlyteRemote.py +++ /dev/null @@ -1,29 +0,0 @@ -from flytekit.configuration import Config -from flytekit.remote import FlyteRemote -from remote.remote import RustFlyteRemote - -PROJECT = "flytesnacks" -DOMAIN = "development" - -remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) -task_py = remote_py.fetch_task( - project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" - ) -# print(task_py) - -remote_rs = RustFlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) -task_rs = remote_rs.fetch_task( - project=PROJECT, domain=DOMAIN, name="workflows_.say_1", version="WhIAnhpyjrAdaRvrQ9Cjpw" -) -# print(task_rs) -assert task_py == task_rs - - -tasks_py = remote_py.list_tasks_by_version( - project=PROJECT, domain=DOMAIN, version="WhIAnhpyjrAdaRvrQ9Cjpw" -) -tasks_rs = remote_rs.list_tasks_by_version( - project=PROJECT, domain=DOMAIN, version="WhIAnhpyjrAdaRvrQ9Cjpw" -) -assert tasks_py == tasks_rs - diff --git a/flyrs/test_flyte_remote.py b/flyrs/test_flyte_remote.py new file mode 100644 index 0000000000..fb887792d8 --- /dev/null +++ b/flyrs/test_flyte_remote.py @@ -0,0 +1,38 @@ +from flytekit.configuration import Config +from flytekit.remote import FlyteRemote +from remote.remote import RustFlyteRemote + +PROJECT = "flytesnacks" +DOMAIN = "development" + +TASK_NAME = "t.say_hi" +WF_NAME = "t.say_hi_wf" +VERSION_ID = "kQYNrRsnGenYk-Y2EF-y6A" + +remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) +remote_rs = RustFlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) + +task_py = remote_py.fetch_task( + project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID +) +task_rs = remote_rs.fetch_task( + project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID +) +assert task_py == task_rs + +tasks_py = remote_py.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version=VERSION_ID +) +tasks_rs = remote_rs.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version=VERSION_ID +) +assert tasks_py == tasks_rs + +workflow_py = remote_py.fetch_workflow( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) +workflow_rs = remote_rs.fetch_workflow( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID +) +assert workflow_py == workflow_rs + From 87aacc99ac9438b2ca7d2ab2e58eb8957e5b5c4a Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 19 Apr 2024 15:48:28 +0800 Subject: [PATCH 12/16] clippy fmt Signed-off-by: Austin Liu --- flyrs/Cargo.lock | 1 + flyrs/Cargo.toml | 3 +- flyrs/src/lib.rs | 307 +++++++++++++++++++++++++++-------------------- 3 files changed, 176 insertions(+), 135 deletions(-) diff --git a/flyrs/Cargo.lock b/flyrs/Cargo.lock index 0f37693a4e..a166c09349 100644 --- a/flyrs/Cargo.lock +++ b/flyrs/Cargo.lock @@ -179,6 +179,7 @@ dependencies = [ [[package]] name = "flyteidl" version = "0.1.0" +source = "git+https://github.com/austin362667/flyte.git?branch=austin362667/flyteidl/rs#011b58482fc86f50e447b729de91a4b2c4d135ce" dependencies = [ "prost", "prost-types", diff --git a/flyrs/Cargo.toml b/flyrs/Cargo.toml index bdbee85224..03d0a013dd 100644 --- a/flyrs/Cargo.toml +++ b/flyrs/Cargo.toml @@ -16,8 +16,7 @@ prost = "0.12.3" tonic = "0.11.0" tokio = { version = "1.37.0", features = ["full"] } pyo3 = { version = "0.21", features = ["extension-module", "experimental-async"] } - -flyteidl = { path="../../flyte/flyteidl" } +flyteidl = { git="https://github.com/austin362667/flyte.git", branch = "austin362667/flyteidl/rs" } tower = "0.4.13" [build-dependencies] diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 69e2394c1f..46675a9f10 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -1,17 +1,17 @@ -use prost::{Message}; +use prost::Message; use pyo3::prelude::*; use pyo3::types::PyBytes; use tokio::runtime::{Builder, Runtime}; use tonic::{ - metadata::MetadataValue, - codegen::InterceptedService, - service::Interceptor, - transport::{Channel, Endpoint, Error}, - Request, Status, + codegen::InterceptedService, + metadata::MetadataValue, + service::Interceptor, + transport::{Channel, Endpoint, Error}, + Request, Status, }; -use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; use flyteidl::flyteidl::admin; +use flyteidl::flyteidl::service::admin_service_client::AdminServiceClient; use std::option::Option; // Unlike the normal use case of PyO3, we don't have to add attribute macros such as #[pyclass] or #[pymethods] to all of our flyteidl structs. @@ -23,17 +23,19 @@ use std::option::Option; #[pyclass(subclass)] pub struct FlyteClient { - admin_service: AdminServiceClient>, - runtime: Runtime, + admin_service: AdminServiceClient>, + runtime: Runtime, } struct AuthUnaryInterceptor; impl Interceptor for AuthUnaryInterceptor { - fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { - let token: MetadataValue<_> = "Bearer some-auth-token".parse().unwrap(); - request.metadata_mut().insert("authorization", token.clone()); - Ok(request) + fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { + let token: MetadataValue<_> = "Bearer some-auth-token".parse().unwrap(); + request + .metadata_mut() + .insert("authorization", token.clone()); + Ok(request) } } @@ -42,154 +44,193 @@ impl Interceptor for AuthUnaryInterceptor { // TODO: figure out how to pass in the required initial args into constructor in a clean and neat way. #[pymethods] impl FlyteClient { - #[new] // Without this, you cannot construct the underlying class in Python. - pub fn new() -> PyResult { - let rt = Builder::new_multi_thread().enable_all().build().unwrap(); - // TODO: Create a channel then bind it to every stubs/clients instead of connecting everytime. - let channel = rt.block_on(Endpoint::from_static("http://localhost:30080").connect()).unwrap(); - // let stub = rt.block_on(AdminServiceClient::connect("http://localhost:30080")).unwrap(); - let mut stub = AdminServiceClient::with_interceptor(channel, AuthUnaryInterceptor); - // TODO: Add more thoughtful error handling - Ok(FlyteClient { - runtime: rt, // The tokio runtime is used in a blocking manner now, leaving lots of investigation and TODOs behind. - admin_service: stub, + #[new] // Without this, you cannot construct the underlying class in Python. + pub fn new() -> PyResult { + let rt = Builder::new_multi_thread().enable_all().build().unwrap(); + // TODO: Create a channel then bind it to every stubs/clients instead of connecting everytime. + let channel = rt + .block_on(Endpoint::from_static("http://localhost:30080").connect()) + .unwrap(); + // let stub = rt.block_on(AdminServiceClient::connect("http://localhost:30080")).unwrap(); + let mut stub = AdminServiceClient::with_interceptor(channel, AuthUnaryInterceptor); + // TODO: Add more thoughtful error handling + Ok(FlyteClient { + runtime: rt, // The tokio runtime is used in a blocking manner now, leaving lots of investigation and TODOs behind. + admin_service: stub, + }) } - ) - } - - // fn parse_from_bytes(pb2_type, buf: &[u8]) { - // } - // fn serialize_tobytes(proto) { - // } - - pub fn create_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::TaskCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); - - // Interacting with the gRPC server: flyteadmin - let res = (self.runtime.block_on(self.admin_service.create_task(req))).unwrap().into_inner(); - - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); - - Ok(PyBytes::new_bound(py, &buf).into()) - } - - pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); - - // Interacting with the gRPC server: flyteadmin - let res = (self.runtime.block_on(self.admin_service.get_task(req))).unwrap().into_inner(); - - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); - - Ok(PyBytes::new_bound(py, &buf).into()) - } - - pub fn list_task_ids_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::NamedEntityIdentifierListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); - // Interacting with the gRPC server: flyteadmin - let res = self.runtime.block_on(self.admin_service.list_task_ids(req)).unwrap().into_inner(); + // fn parse_from_bytes(pb2_type, buf: &[u8]) { + // } + // fn serialize_tobytes(proto) { + // } - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + pub fn create_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::TaskCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); - Ok(PyBytes::new_bound(py, &buf).into()) - } + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.create_task(req))) + .unwrap() + .into_inner(); - pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); - // Interacting with the gRPC server: flyteadmin - let res = self.runtime.block_on(self.admin_service.list_tasks(req)).unwrap().into_inner(); - - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + Ok(PyBytes::new_bound(py, &buf).into()) + } - Ok(PyBytes::new_bound(py, &buf).into()) - } + pub fn get_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); - pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { // PyResult> - let bytes = bytes_obj.as_bytes(); - println!("Received bytes: {:?}", bytes); - let decoded: admin::Task = Message::decode(&bytes.to_vec()[..]).unwrap(); - println!("Parsed Task: {:?}", decoded); - let mut buf = vec![]; - decoded.encode(&mut buf).unwrap(); - println!("Serialized Task: {:?}", decoded); - // Returning bytes buffer - Ok(PyBytes::new_bound(py, &buf).into()) - } + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.get_task(req))) + .unwrap() + .into_inner(); - pub fn create_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::WorkflowCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); - // Interacting with the gRPC server: flyteadmin - let res = (self.runtime.block_on(self.admin_service.create_workflow(req))).unwrap().into_inner(); + Ok(PyBytes::new_bound(py, &buf).into()) + } - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + pub fn list_task_ids_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::NamedEntityIdentifierListRequest = + Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_task_ids(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } - Ok(PyBytes::new_bound(py, &buf).into()) - } + pub fn list_tasks_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); - pub fn get_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_tasks(req)) + .unwrap() + .into_inner(); - // Interacting with the gRPC server: flyteadmin - let res = (self.runtime.block_on(self.admin_service.get_workflow(req))).unwrap().into_inner(); + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + Ok(PyBytes::new_bound(py, &buf).into()) + } - Ok(PyBytes::new_bound(py, &buf).into()) - } + pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + // PyResult> + let bytes = bytes_obj.as_bytes(); + println!("Received bytes: {:?}", bytes); + let decoded: admin::Task = Message::decode(&bytes.to_vec()[..]).unwrap(); + println!("Parsed Task: {:?}", decoded); + let mut buf = vec![]; + decoded.encode(&mut buf).unwrap(); + println!("Serialized Task: {:?}", decoded); + // Returning bytes buffer + Ok(PyBytes::new_bound(py, &buf).into()) + } - pub fn list_workflow_ids_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::NamedEntityIdentifierListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); + pub fn create_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::WorkflowCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); - // Interacting with the gRPC server: flyteadmin - let res = self.runtime.block_on(self.admin_service.list_workflow_ids(req)).unwrap().into_inner(); + // Interacting with the gRPC server: flyteadmin + let res = (self + .runtime + .block_on(self.admin_service.create_workflow(req))) + .unwrap() + .into_inner(); - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); - Ok(PyBytes::new_bound(py, &buf).into()) - } + Ok(PyBytes::new_bound(py, &buf).into()) + } - pub fn list_workflows_paginated(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - let bytes = bytes_obj.as_bytes(); - let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); - let req = tonic::Request::new(decoded); + pub fn get_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); - // Interacting with the gRPC server: flyteadmin - let res = self.runtime.block_on(self.admin_service.list_workflows(req)).unwrap().into_inner(); + // Interacting with the gRPC server: flyteadmin + let res = (self.runtime.block_on(self.admin_service.get_workflow(req))) + .unwrap() + .into_inner(); - let mut buf = vec![]; - res.encode(&mut buf).unwrap(); + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); - Ok(PyBytes::new_bound(py, &buf).into()) - } + Ok(PyBytes::new_bound(py, &buf).into()) + } + pub fn list_workflow_ids_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::NamedEntityIdentifierListRequest = + Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_workflow_ids(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + pub fn list_workflows_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_workflows(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } } - // Some trials // fn tokio() -> &'static tokio::runtime::Runtime { // use std::sync::OnceLock; From e4f9a3f9ce9b0f3041f20cc797a87d2120b81127 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 19 Apr 2024 16:51:22 +0800 Subject: [PATCH 13/16] port launch_plan api Signed-off-by: Austin Liu --- flyrs/clients/friendly.py | 51 ++++++----- flyrs/src/lib.rs | 168 ++++++++++++++++++++++++++++++++++--- flyrs/test_flyte_remote.py | 8 ++ 3 files changed, 193 insertions(+), 34 deletions(-) diff --git a/flyrs/clients/friendly.py b/flyrs/clients/friendly.py index 157dbe9ffb..c1161ca8fa 100644 --- a/flyrs/clients/friendly.py +++ b/flyrs/clients/friendly.py @@ -295,7 +295,7 @@ def list_workflows_paginated(self, identifier, limit=100, token=None, filters=No token=token, filters=_filters.FilterList(filters or []).to_flyte_idl(), sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) # TODO: tmp workaround workflows = _task_pb2.TaskList() @@ -350,7 +350,7 @@ def create_launch_plan(self, launch_plan_identifer, launch_plan_spec): _launch_plan_pb2.LaunchPlanCreateRequest( id=launch_plan_identifer.to_flyte_idl(), spec=launch_plan_spec.to_flyte_idl(), - ) + ).SerializeToString() ) def get_launch_plan(self, id): @@ -360,10 +360,12 @@ def get_launch_plan(self, id): :param flytekit.models.core.identifier.Identifier id: unique identifier for launch plan to retrieve :rtype: flytekit.models.launch_plan.LaunchPlan """ - return _launch_plan.LaunchPlan.from_flyte_idl( - super(RustSynchronousFlyteClient, self).get_launch_plan(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl())) + launch_plan = _launch_plan_pb2.LaunchPlan() + launch_plan.ParseFromString( + super(RustSynchronousFlyteClient, self).get_launch_plan(_common_pb2.ObjectGetRequest(id=id.to_flyte_idl()).SerializeToString()) ) - + return _launch_plan.LaunchPlan.from_flyte_idl(launch_plan) + def get_active_launch_plan(self, identifier): """ Retrieves the active launch plan entity given a named entity identifier (project, domain, name). Raises an @@ -372,11 +374,11 @@ def get_active_launch_plan(self, identifier): :param flytekit.models.common.NamedEntityIdentifier identifier: NamedEntityIdentifier to list. :rtype: flytekit.models.launch_plan.LaunchPlan """ - return _launch_plan.LaunchPlan.from_flyte_idl( - super(RustSynchronousFlyteClient, self).get_active_launch_plan( - _launch_plan_pb2.ActiveLaunchPlanRequest(id=identifier.to_flyte_idl()) - ) + launch_plan = _launch_plan_pb2.LaunchPlan() + launch_plan.ParseFromString( + super(RustSynchronousFlyteClient, self).get_active_launch_plan(_launch_plan_pb2.ActiveLaunchPlanRequest(id=identifier.to_flyte_idl()).SerializeToString()) ) + return _launch_plan.LaunchPlan.from_flyte_idl(launch_plan) def list_launch_plan_ids_paginated(self, project, domain, limit=100, token=None, sort_by=None): """ @@ -412,11 +414,13 @@ def list_launch_plan_ids_paginated(self, project, domain, limit=100, token=None, limit=limit, token=token, sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) + ids = _launch_plan_pb2.IdentifierList() + ids.ParseFromString(identifier_list) return ( - [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in identifier_list.entities], - str(identifier_list.token), + [_common.NamedEntityIdentifier.from_flyte_idl(identifier_pb) for identifier_pb in ids.entities], + str(ids.token), ) def list_launch_plans_paginated(self, identifier, limit=100, token=None, filters=None, sort_by=None): @@ -454,14 +458,16 @@ def list_launch_plans_paginated(self, identifier, limit=100, token=None, filters token=token, filters=_filters.FilterList(filters or []).to_flyte_idl(), sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) # TODO: tmp workaround - for pb in lp_list.launch_plans: + launchplans = _launch_plan_pb2.LaunchPlanList() + launchplans.ParseFromString(lp_list) + for pb in launchplans.launch_plans: pb.id.resource_type = _identifier.ResourceType.LAUNCH_PLAN return ( - [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in lp_list.launch_plans], - str(lp_list.token), + [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in launchplans.launch_plans], + str(launchplans.token), ) def list_active_launch_plans_paginated( @@ -500,16 +506,17 @@ def list_active_launch_plans_paginated( limit=limit, token=token, sort_by=None if sort_by is None else sort_by.to_flyte_idl(), - ) + ).SerializeToString() ) # TODO: tmp workaround - for pb in lp_list.launch_plans: + launchplans = _launch_plan_pb2.LaunchPlanList() + launchplans.ParseFromString(lp_list) + for pb in launchplans.launch_plans: pb.id.resource_type = _identifier.ResourceType.LAUNCH_PLAN return ( - [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in lp_list.launch_plans], - str(lp_list.token), + [_launch_plan.LaunchPlan.from_flyte_idl(pb) for pb in launchplans.launch_plans], + str(launchplans.token), ) - def update_launch_plan(self, id, state): """ Updates a launch plan. Currently, this can only be used to update a given launch plan's state (ACTIVE v. @@ -521,7 +528,7 @@ def update_launch_plan(self, id, state): :param int state: Enum value from flytekit.models.launch_plan.LaunchPlanState """ super(RustSynchronousFlyteClient, self).update_launch_plan( - _launch_plan_pb2.LaunchPlanUpdateRequest(id=id.to_flyte_idl(), state=state) + _launch_plan_pb2.LaunchPlanUpdateRequest(id=id.to_flyte_idl(), state=state).SerializeToString() ) #################################################################################################################### diff --git a/flyrs/src/lib.rs b/flyrs/src/lib.rs index 46675a9f10..3748761442 100644 --- a/flyrs/src/lib.rs +++ b/flyrs/src/lib.rs @@ -138,18 +138,18 @@ impl FlyteClient { Ok(PyBytes::new_bound(py, &buf).into()) } - pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { - // PyResult> - let bytes = bytes_obj.as_bytes(); - println!("Received bytes: {:?}", bytes); - let decoded: admin::Task = Message::decode(&bytes.to_vec()[..]).unwrap(); - println!("Parsed Task: {:?}", decoded); - let mut buf = vec![]; - decoded.encode(&mut buf).unwrap(); - println!("Serialized Task: {:?}", decoded); - // Returning bytes buffer - Ok(PyBytes::new_bound(py, &buf).into()) - } + // pub fn echo_task(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + // // PyResult> + // let bytes = bytes_obj.as_bytes(); + // println!("Received bytes: {:?}", bytes); + // let decoded: admin::Task = Message::decode(&bytes.to_vec()[..]).unwrap(); + // println!("Parsed Task: {:?}", decoded); + // let mut buf = vec![]; + // decoded.encode(&mut buf).unwrap(); + // println!("Serialized Task: {:?}", decoded); + // // Returning bytes buffer + // Ok(PyBytes::new_bound(py, &buf).into()) + // } pub fn create_workflow(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { let bytes = bytes_obj.as_bytes(); @@ -229,6 +229,150 @@ impl FlyteClient { Ok(PyBytes::new_bound(py, &buf).into()) } + + pub fn create_launch_plan(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::LaunchPlanCreateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self + .runtime + .block_on(self.admin_service.create_launch_plan(req))) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn get_launch_plan(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ObjectGetRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self + .runtime + .block_on(self.admin_service.get_launch_plan(req))) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn list_launch_plan_ids_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::NamedEntityIdentifierListRequest = + Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_launch_plan_ids(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn list_launch_plans_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ResourceListRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_launch_plans(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn update_launch_plan(&mut self, py: Python, bytes_obj: &PyBytes) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::LaunchPlanUpdateRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self + .runtime + .block_on(self.admin_service.update_launch_plan(req))) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn get_active_launch_plan( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ActiveLaunchPlanRequest = Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = (self + .runtime + .block_on(self.admin_service.get_active_launch_plan(req))) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } + + pub fn list_active_launch_plans_paginated( + &mut self, + py: Python, + bytes_obj: &PyBytes, + ) -> PyResult { + let bytes = bytes_obj.as_bytes(); + let decoded: admin::ActiveLaunchPlanListRequest = + Message::decode(&bytes.to_vec()[..]).unwrap(); + let req = tonic::Request::new(decoded); + + // Interacting with the gRPC server: flyteadmin + let res = self + .runtime + .block_on(self.admin_service.list_active_launch_plans(req)) + .unwrap() + .into_inner(); + + let mut buf = vec![]; + res.encode(&mut buf).unwrap(); + + Ok(PyBytes::new_bound(py, &buf).into()) + } } // Some trials diff --git a/flyrs/test_flyte_remote.py b/flyrs/test_flyte_remote.py index fb887792d8..e2e585d1d8 100644 --- a/flyrs/test_flyte_remote.py +++ b/flyrs/test_flyte_remote.py @@ -36,3 +36,11 @@ ) assert workflow_py == workflow_rs +launchplan_py = remote_py.fetch_launch_plan( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) +launchplan_rs = remote_rs.fetch_launch_plan( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID +) +assert workflow_py == workflow_rs + From 9f20d3f94b4321d3bd50cc6943d508e04bc606c3 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Fri, 19 Apr 2024 17:28:55 +0800 Subject: [PATCH 14/16] refine testing Signed-off-by: Austin Liu refine testing Signed-off-by: Austin Liu --- flyrs/readme.md | 2 +- flyrs/test_flyte_remote.py | 70 +++++++++++++++++++++----------------- 2 files changed, 40 insertions(+), 32 deletions(-) diff --git a/flyrs/readme.md b/flyrs/readme.md index 46a55a1fdf..6f7e63ff55 100644 --- a/flyrs/readme.md +++ b/flyrs/readme.md @@ -8,4 +8,4 @@ ### How to test Rust FlyteRemote Client? 1. `pyflyte register ./t.py` to get flyte entity `version` id 2. Set previous fetched `version` id in `./test_flyte_remote.py`'s `VERSION_ID` -2. `python ./test_flyte_remote.py` in `flyrs/` \ No newline at end of file +2. `pytest ./test_flyte_remote.py` inside `flyrs/` \ No newline at end of file diff --git a/flyrs/test_flyte_remote.py b/flyrs/test_flyte_remote.py index e2e585d1d8..9b76f75649 100644 --- a/flyrs/test_flyte_remote.py +++ b/flyrs/test_flyte_remote.py @@ -12,35 +12,43 @@ remote_py = FlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) remote_rs = RustFlyteRemote(Config.auto(), default_project=PROJECT, default_domain=DOMAIN) -task_py = remote_py.fetch_task( - project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID -) -task_rs = remote_rs.fetch_task( - project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID -) -assert task_py == task_rs - -tasks_py = remote_py.list_tasks_by_version( - project=PROJECT, domain=DOMAIN, version=VERSION_ID -) -tasks_rs = remote_rs.list_tasks_by_version( - project=PROJECT, domain=DOMAIN, version=VERSION_ID -) -assert tasks_py == tasks_rs - -workflow_py = remote_py.fetch_workflow( - project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID - ) -workflow_rs = remote_rs.fetch_workflow( - project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID -) -assert workflow_py == workflow_rs - -launchplan_py = remote_py.fetch_launch_plan( - project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID - ) -launchplan_rs = remote_rs.fetch_launch_plan( - project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID -) -assert workflow_py == workflow_rs +## test remote endpoints + +def test_fetch_task(): + task_py = remote_py.fetch_task( + project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID + ) + task_rs = remote_rs.fetch_task( + project=PROJECT, domain=DOMAIN, name=TASK_NAME, version=VERSION_ID + ) + assert task_py == task_rs + +def test_list_tasks_by_version(): + tasks_py = remote_py.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version=VERSION_ID + ) + tasks_rs = remote_rs.list_tasks_by_version( + project=PROJECT, domain=DOMAIN, version=VERSION_ID + ) + assert len(tasks_py)==1 + assert len(tasks_rs)==1 + assert tasks_py == tasks_rs + +def test_fetch_workflow(): + workflow_py = remote_py.fetch_workflow( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) + workflow_rs = remote_rs.fetch_workflow( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) + assert workflow_py == workflow_rs + +def test_fetch_launch_plan(): + launchplan_py = remote_py.fetch_launch_plan( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) + launchplan_rs = remote_rs.fetch_launch_plan( + project=PROJECT, domain=DOMAIN, name=WF_NAME, version=VERSION_ID + ) + assert launchplan_py == launchplan_rs From 86c9a7616a1fd64a6352a40ca350906f19ed1bf1 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Sun, 21 Apr 2024 15:33:27 +0800 Subject: [PATCH 15/16] cleanup imports Signed-off-by: Austin Liu --- flyrs/clients/backfill.py | 107 ---- flyrs/clients/data.py | 55 -- flyrs/clients/entities.py | 839 ------------------------------- flyrs/clients/executions.py | 212 -------- flyrs/clients/interface.py | 11 - flyrs/clients/lazy_entity.py | 67 --- flyrs/clients/remote_callable.py | 75 --- flyrs/clients/remote_fs.py | 266 ---------- flyrs/remote/remote.py | 16 +- 9 files changed, 8 insertions(+), 1640 deletions(-) delete mode 100644 flyrs/clients/backfill.py delete mode 100644 flyrs/clients/data.py delete mode 100644 flyrs/clients/entities.py delete mode 100644 flyrs/clients/executions.py delete mode 100644 flyrs/clients/interface.py delete mode 100644 flyrs/clients/lazy_entity.py delete mode 100644 flyrs/clients/remote_callable.py delete mode 100644 flyrs/clients/remote_fs.py diff --git a/flyrs/clients/backfill.py b/flyrs/clients/backfill.py deleted file mode 100644 index 98649410d6..0000000000 --- a/flyrs/clients/backfill.py +++ /dev/null @@ -1,107 +0,0 @@ -import logging -import typing -from datetime import datetime, timedelta - -from croniter import croniter - -from flytekit import LaunchPlan -from flytekit.core.workflow import ImperativeWorkflow, WorkflowBase, WorkflowFailurePolicy -from clients.entities import FlyteLaunchPlan - - -def create_backfill_workflow( - start_date: datetime, - end_date: datetime, - for_lp: typing.Union[LaunchPlan, FlyteLaunchPlan], - parallel: bool = False, - per_node_timeout: timedelta = None, - per_node_retries: int = 0, - failure_policy: typing.Optional[WorkflowFailurePolicy] = None, -) -> typing.Tuple[WorkflowBase, datetime, datetime]: - """ - Generates a new imperative workflow for the launchplan that can be used to backfill the given launchplan. - This can only be used to generate backfilling workflow only for schedulable launchplans - - the Backfill plan is generated as (start_date - exclusive, end_date inclusive) - - .. code-block:: python - :caption: Correct usage for dates example - - lp = Launchplan.get_or_create(...) - start_date = datetime.datetime(2023, 1, 1) - end_date = start_date + datetime.timedelta(days=10) - wf = create_backfill_workflow(start_date, end_date, for_lp=lp) - - - .. code-block:: python - :caption: Incorrect date example - - wf = create_backfill_workflow(end_date, start_date, for_lp=lp) # end_date is before start_date - # OR - wf = create_backfill_workflow(start_date, start_date, for_lp=lp) # start and end date are same - - - :param start_date: datetime generate a backfill starting at this datetime (exclusive) - :param end_date: datetime generate a backfill ending at this datetime (inclusive) - :param for_lp: typing.Union[LaunchPlan, FlyteLaunchPlan] the backfill is generated for this launchplan - :param parallel: if the backfill should be run in parallel. False (default) will run each bacfill sequentially - :param per_node_timeout: timedelta Timeout to use per node - :param per_node_retries: int Retries to user per node - :param failure_policy: WorkflowFailurePolicy Failure policy to use for the backfill workflow - :return: WorkflowBase, datetime datetime -> New generated workflow, datetime for first instance of backfill, datetime for last instance of backfill - """ - if not for_lp: - raise ValueError("Launch plan is required!") - - if start_date >= end_date: - raise ValueError( - f"for a backfill start date should be earlier than end date. Received {start_date} -> {end_date}" - ) - - schedule = for_lp.entity_metadata.schedule if isinstance(for_lp, FlyteLaunchPlan) else for_lp.schedule - - if schedule is None: - raise ValueError("Backfill can only be created for scheduled launch plans") - - if schedule.cron_schedule is not None: - cron_schedule = schedule.cron_schedule - else: - raise NotImplementedError("Currently backfilling only supports cron schedules.") - - logging.info( - f"Generating backfill from {start_date} -> {end_date}. " - f"Parallel?[{parallel}] FailurePolicy[{str(failure_policy)}]" - ) - wf = ImperativeWorkflow(name=f"backfill-{for_lp.name}", failure_policy=failure_policy) - - input_name = schedule.kickoff_time_input_arg - date_iter = croniter(cron_schedule.schedule, start_time=start_date, ret_type=datetime) - prev_node = None - actual_start = None - actual_end = None - while True: - next_start_date = date_iter.get_next() - if not actual_start: - actual_start = next_start_date - if next_start_date >= end_date: - break - actual_end = next_start_date - inputs = {} - if input_name: - inputs[input_name] = next_start_date - next_node = wf.add_launch_plan(for_lp, **inputs) - next_node = next_node.with_overrides( - name=f"b-{next_start_date}", retries=per_node_retries, timeout=per_node_timeout - ) - if not parallel: - if prev_node: - prev_node.runs_before(next_node) - prev_node = next_node - - if actual_end is None: - raise StopIteration( - f"The time window is too small for any backfill instances, first instance after start" - f" date is {actual_start}" - ) - - return wf, actual_start, actual_end diff --git a/flyrs/clients/data.py b/flyrs/clients/data.py deleted file mode 100644 index 84fcff1420..0000000000 --- a/flyrs/clients/data.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import pathlib -import typing - -from google.protobuf.json_format import MessageToJson -from rich import print - -from flytekit import BlobType, Literal -from flytekit.core.data_persistence import FileAccessProvider -from flytekit.interaction.rich_utils import RichCallback -from flytekit.interaction.string_literals import literal_string_repr - - -def download_literal( - file_access: FileAccessProvider, var: str, data: Literal, download_to: typing.Optional[pathlib.Path] = None -): - """ - Download a single literal to a file, if it is a blob or structured dataset. - """ - if data is None: - print(f"Skipping {var} as it is None.") - return - if data.scalar: - if data.scalar and (data.scalar.blob or data.scalar.structured_dataset): - uri = data.scalar.blob.uri if data.scalar.blob else data.scalar.structured_dataset.uri - if uri is None: - print("No data to download.") - return - is_multipart = False - if data.scalar.blob: - is_multipart = data.scalar.blob.metadata.type.dimensionality == BlobType.BlobDimensionality.MULTIPART - elif data.scalar.structured_dataset: - is_multipart = True - file_access.get_data( - uri, str(download_to / var) + os.sep, is_multipart=is_multipart, callback=RichCallback() - ) - elif data.scalar.union is not None: - download_literal(file_access, var, data.scalar.union.value, download_to) - elif data.scalar.generic is not None: - with open(download_to / f"{var}.json", "w") as f: - f.write(MessageToJson(data.scalar.generic)) - else: - print( - f"[dim]Skipping {var} val {literal_string_repr(data)} as it is not a blob, structured dataset," - f" or generic type.[/dim]" - ) - return - elif data.collection: - for i, v in enumerate(data.collection.literals): - download_literal(file_access, f"{i}", v, download_to / var) - elif data.map: - download_to = pathlib.Path(download_to) - for k, v in data.map.literals.items(): - download_literal(file_access, f"{k}", v, download_to / var) - print(f"Downloaded f{var} to {download_to}") diff --git a/flyrs/clients/entities.py b/flyrs/clients/entities.py deleted file mode 100644 index 9adba429a6..0000000000 --- a/flyrs/clients/entities.py +++ /dev/null @@ -1,839 +0,0 @@ -""" -This module contains shadow entities for all Flyte entities as represented in Flyte Admin / Control Plane. -The goal is to enable easy access, manipulation of these entities. -""" -from __future__ import annotations - -from typing import Dict, List, Optional, Tuple, Union - -from flytekit import FlyteContext -from flytekit.core import constants as _constants -from flytekit.core import hash as _hash_mixin -from flytekit.core import hash as hash_mixin -from flytekit.core.promise import create_and_link_node_from_remote -from flytekit.exceptions import system as _system_exceptions -from flytekit.exceptions import user as _user_exceptions -from flytekit.loggers import logger -from flytekit.models import interface as _interface_models -from flytekit.models import launch_plan as _launch_plan_model -from flytekit.models import launch_plan as _launch_plan_models -from flytekit.models import launch_plan as launch_plan_models -from flytekit.models import task as _task_model -from flytekit.models import task as _task_models -from flytekit.models.admin.workflow import WorkflowSpec -from flytekit.models.core import compiler as compiler_models -from flytekit.models.core import identifier as _identifier_model -from flytekit.models.core import identifier as id_models -from flytekit.models.core import workflow as _workflow_model -from flytekit.models.core import workflow as _workflow_models -from flytekit.models.core.identifier import Identifier -from flytekit.models.core.workflow import Node, WorkflowMetadata, WorkflowMetadataDefaults -from flytekit.models.interface import TypedInterface -from flytekit.models.literals import Binding -from flytekit.models.task import TaskSpec -import clients.interface as _interfaces -from clients.remote_callable import RemoteEntity - - -class FlyteTask(hash_mixin.HashOnReferenceMixin, RemoteEntity, TaskSpec): - """A class encapsulating a remote Flyte task.""" - - def __init__( - self, - id, - type, - metadata, - interface, - custom, - container=None, - task_type_version: int = 0, - config=None, - should_register: bool = False, - ): - super(FlyteTask, self).__init__( - template=_task_model.TaskTemplate( - id, - type, - metadata, - interface, - custom, - container=container, - task_type_version=task_type_version, - config=config, - ) - ) - self._should_register = should_register - - @property - def id(self): - """ - This is generated by the system and uniquely identifies the task. - - :rtype: flytekit.models.core.identifier.Identifier - """ - return self.template.id - - @property - def type(self): - """ - This is used to identify additional extensions for use by Propeller or SDK. - - :rtype: Text - """ - return self.template.type - - @property - def metadata(self): - """ - This contains information needed at runtime to determine behavior such as whether or not outputs are - discoverable, timeouts, and retries. - - :rtype: TaskMetadata - """ - return self.template.metadata - - @property - def interface(self): - """ - The interface definition for this task. - - :rtype: flytekit.models.interface.TypedInterface - """ - return self.template.interface - - @property - def custom(self): - """ - Arbitrary dictionary containing metadata for custom plugins. - - :rtype: dict[Text, T] - """ - return self.template.custom - - @property - def task_type_version(self): - return self.template.task_type_version - - @property - def container(self): - """ - If not None, the target of execution should be a container. - - :rtype: Container - """ - return self.template.container - - @property - def config(self): - """ - Arbitrary dictionary containing metadata for parsing and handling custom plugins. - - :rtype: dict[Text, T] - """ - return self.template.config - - @property - def security_context(self): - return self.template.security_context - - @property - def k8s_pod(self): - return self.template.k8s_pod - - @property - def sql(self): - return self.template.sql - - @property - def should_register(self) -> bool: - return self._should_register - - @property - def name(self) -> str: - return self.template.id.name - - @property - def resource_type(self) -> _identifier_model.ResourceType: - return _identifier_model.ResourceType.TASK - - @property - def entity_type_text(self) -> str: - return "Task" - - @classmethod - def promote_from_model(cls, base_model: _task_model.TaskTemplate) -> FlyteTask: - t = cls( - id=base_model.id, - type=base_model.type, - metadata=base_model.metadata, - interface=_interfaces.TypedInterface.promote_from_model(base_model.interface), - custom=base_model.custom, - container=base_model.container, - task_type_version=base_model.task_type_version, - ) - # Override the newly generated name if one exists in the base model - if not base_model.id.is_empty: - t._id = base_model.id - - return t - - -class FlyteTaskNode(_workflow_model.TaskNode): - """A class encapsulating a task that a Flyte node needs to execute.""" - - def __init__(self, flyte_task: FlyteTask): - super(FlyteTaskNode, self).__init__(None) - self._flyte_task = flyte_task - - @property - def reference_id(self) -> id_models.Identifier: - """A globally unique identifier for the task.""" - return self._flyte_task.id - - @property - def flyte_task(self) -> FlyteTask: - return self._flyte_task - - @classmethod - def promote_from_model(cls, task: FlyteTask) -> FlyteTaskNode: - """ - Takes the idl wrapper for a TaskNode, - and returns the hydrated Flytekit object for it by fetching it with the FlyteTask control plane. - """ - return cls(flyte_task=task) - - -class FlyteWorkflowNode(_workflow_model.WorkflowNode): - """A class encapsulating a workflow that a Flyte node needs to execute.""" - - def __init__( - self, - flyte_workflow: FlyteWorkflow = None, - flyte_launch_plan: FlyteLaunchPlan = None, - ): - if flyte_workflow and flyte_launch_plan: - raise _system_exceptions.FlyteSystemException( - "FlyteWorkflowNode cannot be called with both a workflow and a launchplan specified, please pick " - f"one. workflow: {flyte_workflow} launchPlan: {flyte_launch_plan}", - ) - - self._flyte_workflow = flyte_workflow - self._flyte_launch_plan = flyte_launch_plan - super(FlyteWorkflowNode, self).__init__( - launchplan_ref=self._flyte_launch_plan.id if self._flyte_launch_plan else None, - sub_workflow_ref=self._flyte_workflow.id if self._flyte_workflow else None, - ) - - def __repr__(self) -> str: - if self.flyte_workflow is not None: - return f"FlyteWorkflowNode with workflow: {self.flyte_workflow}" - return f"FlyteWorkflowNode with launch plan: {self.flyte_launch_plan}" - - @property - def launchplan_ref(self) -> id_models.Identifier: - """A globally unique identifier for the launch plan, which should map to Admin.""" - return self._flyte_launch_plan.id if self._flyte_launch_plan else None - - @property - def sub_workflow_ref(self): - return self._flyte_workflow.id if self._flyte_workflow else None - - @property - def flyte_launch_plan(self) -> FlyteLaunchPlan: - return self._flyte_launch_plan - - @property - def flyte_workflow(self) -> FlyteWorkflow: - return self._flyte_workflow - - @classmethod - def _promote_workflow( - cls, - wf: _workflow_models.WorkflowTemplate, - sub_workflows: Optional[Dict[Identifier, _workflow_models.WorkflowTemplate]] = None, - tasks: Optional[Dict[Identifier, FlyteTask]] = None, - node_launch_plans: Optional[Dict[Identifier, launch_plan_models.LaunchPlanSpec]] = None, - ) -> FlyteWorkflow: - return FlyteWorkflow.promote_from_model( - wf, - sub_workflows=sub_workflows, - node_launch_plans=node_launch_plans, - tasks=tasks, - ) - - @classmethod - def promote_from_model( - cls, - base_model: _workflow_model.WorkflowNode, - sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], - node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], - tasks: Dict[Identifier, FlyteTask], - converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], - ) -> Tuple[FlyteWorkflowNode, Dict[id_models.Identifier, FlyteWorkflow]]: - if base_model.launchplan_ref is not None: - return ( - cls( - flyte_launch_plan=FlyteLaunchPlan.promote_from_model( - base_model.launchplan_ref, node_launch_plans[base_model.launchplan_ref] - ) - ), - converted_sub_workflows, - ) - elif base_model.sub_workflow_ref is not None: - # the workflow templates for sub-workflows should have been included in the original response - if base_model.reference in sub_workflows: - wf = None - if base_model.reference not in converted_sub_workflows: - wf = cls._promote_workflow( - sub_workflows[base_model.reference], - sub_workflows=sub_workflows, - node_launch_plans=node_launch_plans, - tasks=tasks, - ) - converted_sub_workflows[base_model.reference] = wf - else: - wf = converted_sub_workflows[base_model.reference] - return cls(flyte_workflow=wf), converted_sub_workflows - raise _system_exceptions.FlyteSystemException(f"Subworkflow {base_model.reference} not found.") - - raise _system_exceptions.FlyteSystemException( - "Bad workflow node model, neither subworkflow nor launchplan specified." - ) - - -class FlyteBranchNode(_workflow_model.BranchNode): - def __init__(self, if_else: _workflow_model.IfElseBlock): - super().__init__(if_else) - - @classmethod - def promote_from_model( - cls, - base_model: _workflow_model.BranchNode, - sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], - node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], - tasks: Dict[id_models.Identifier, FlyteTask], - converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], - ) -> Tuple[FlyteBranchNode, Dict[id_models.Identifier, FlyteWorkflow]]: - block = base_model.if_else - block.case._then_node, converted_sub_workflows = FlyteNode.promote_from_model( - block.case.then_node, - sub_workflows, - node_launch_plans, - tasks, - converted_sub_workflows, - ) - - for o in block.other: - o._then_node, converted_sub_workflows = FlyteNode.promote_from_model( - o.then_node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows - ) - - else_node = None - if block.else_node: - else_node, converted_sub_workflows = FlyteNode.promote_from_model( - block.else_node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows - ) - - new_if_else_block = _workflow_model.IfElseBlock(block.case, block.other, else_node, block.error) - - return cls(new_if_else_block), converted_sub_workflows - - -class FlyteGateNode(_workflow_model.GateNode): - @classmethod - def promote_from_model(cls, model: _workflow_model.GateNode): - return cls(model.signal, model.sleep, model.approve) - - -class FlyteArrayNode(_workflow_model.ArrayNode): - @classmethod - def promote_from_model(cls, model: _workflow_model.ArrayNode): - return cls(model._parallelism, model._node, model._min_success_ratio, model._min_successes) - - -class FlyteNode(_hash_mixin.HashOnReferenceMixin, _workflow_model.Node): - """A class encapsulating a remote Flyte node.""" - - def __init__( - self, - id, - upstream_nodes, - bindings, - metadata, - task_node: Optional[FlyteTaskNode] = None, - workflow_node: Optional[FlyteWorkflowNode] = None, - branch_node: Optional[FlyteBranchNode] = None, - gate_node: Optional[FlyteGateNode] = None, - array_node: Optional[FlyteArrayNode] = None, - ): - if not task_node and not workflow_node and not branch_node and not gate_node and not array_node: - raise _user_exceptions.FlyteAssertion( - "An Flyte node must have one of task|workflow|branch|gate|array entity specified at once" - ) - # TODO: Revisit flyte_branch_node and flyte_gate_node, should they be another type like Condition instead - # of a node? - self._flyte_task_node = task_node - if task_node: - self._flyte_entity = task_node.flyte_task - elif workflow_node: - self._flyte_entity = workflow_node.flyte_workflow or workflow_node.flyte_launch_plan - else: - self._flyte_entity = branch_node or gate_node or array_node - - super(FlyteNode, self).__init__( - id=id, - metadata=metadata, - inputs=bindings, - upstream_node_ids=[n.id for n in upstream_nodes], - output_aliases=[], - task_node=task_node, - workflow_node=workflow_node, - branch_node=branch_node, - gate_node=gate_node, - array_node=array_node, - ) - self._upstream = upstream_nodes - - @property - def task_node(self) -> Optional[FlyteTaskNode]: - return self._flyte_task_node - - @property - def flyte_entity(self) -> Union[FlyteTask, FlyteWorkflow, FlyteLaunchPlan, FlyteBranchNode]: - return self._flyte_entity - - @classmethod - def _promote_task_node(cls, t: FlyteTask) -> FlyteTaskNode: - return FlyteTaskNode.promote_from_model(t) - - @classmethod - def _promote_workflow_node( - cls, - wn: _workflow_model.WorkflowNode, - sub_workflows: Dict[id_models.Identifier, _workflow_model.WorkflowTemplate], - node_launch_plans: Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec], - tasks: Dict[Identifier, FlyteTask], - converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], - ) -> Tuple[FlyteWorkflowNode, Dict[id_models.Identifier, FlyteWorkflow]]: - return FlyteWorkflowNode.promote_from_model( - wn, - sub_workflows, - node_launch_plans, - tasks, - converted_sub_workflows, - ) - - @classmethod - def promote_from_model( - cls, - model: _workflow_model.Node, - sub_workflows: Optional[Dict[id_models.Identifier, _workflow_model.WorkflowTemplate]], - node_launch_plans: Optional[Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec]], - tasks: Dict[id_models.Identifier, FlyteTask], - converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], - ) -> Tuple[Optional[FlyteNode], Dict[id_models.Identifier, FlyteWorkflow]]: - node_model_id = model.id - # TODO: Consider removing - if id in {_constants.START_NODE_ID, _constants.END_NODE_ID}: - logger.warning(f"Should not call promote from model on a start node or end node {model}") - return None, converted_sub_workflows - - flyte_task_node, flyte_workflow_node, flyte_branch_node, flyte_gate_node, flyte_array_node = ( - None, - None, - None, - None, - None, - ) - if model.task_node is not None: - if model.task_node.reference_id not in tasks: - raise RuntimeError( - f"Remote Workflow closure does not have task with id {model.task_node.reference_id}." - ) - flyte_task_node = cls._promote_task_node(tasks[model.task_node.reference_id]) - elif model.workflow_node is not None: - flyte_workflow_node, converted_sub_workflows = cls._promote_workflow_node( - model.workflow_node, - sub_workflows, - node_launch_plans, - tasks, - converted_sub_workflows, - ) - elif model.branch_node is not None: - flyte_branch_node, converted_sub_workflows = FlyteBranchNode.promote_from_model( - model.branch_node, - sub_workflows, - node_launch_plans, - tasks, - converted_sub_workflows, - ) - elif model.gate_node is not None: - flyte_gate_node = FlyteGateNode.promote_from_model(model.gate_node) - elif model.array_node is not None: - flyte_array_node = FlyteArrayNode.promote_from_model(model.array_node) - # TODO: validate task in tasks - else: - raise _system_exceptions.FlyteSystemException( - f"Bad Node model, neither task nor workflow detected, node: {model}" - ) - - # When WorkflowTemplate models (containing node models) are returned by Admin, they've been compiled with a - # start node. In order to make the promoted FlyteWorkflow look the same, we strip the start-node text back out. - # TODO: Consider removing - for model_input in model.inputs: - if ( - model_input.binding.promise is not None - and model_input.binding.promise.node_id == _constants.START_NODE_ID - ): - model_input.binding.promise._node_id = _constants.GLOBAL_INPUT_NODE_ID - - return ( - cls( - id=node_model_id, - upstream_nodes=[], # set downstream, model doesn't contain this information - bindings=model.inputs, - metadata=model.metadata, - task_node=flyte_task_node, - workflow_node=flyte_workflow_node, - branch_node=flyte_branch_node, - gate_node=flyte_gate_node, - array_node=flyte_array_node, - ), - converted_sub_workflows, - ) - - @property - def upstream_nodes(self) -> List[FlyteNode]: - return self._upstream - - @property - def upstream_node_ids(self) -> List[str]: - return list(sorted(n.id for n in self.upstream_nodes)) - - def __repr__(self) -> str: - return f"Node(ID: {self.id})" - - -class FlyteWorkflow(_hash_mixin.HashOnReferenceMixin, RemoteEntity, WorkflowSpec): - """A class encapsulating a remote Flyte workflow.""" - - def __init__( - self, - id: id_models.Identifier, - nodes: List[FlyteNode], - interface, - output_bindings, - metadata, - metadata_defaults, - subworkflows: Optional[List[FlyteWorkflow]] = None, - tasks: Optional[List[FlyteTask]] = None, - launch_plans: Optional[Dict[id_models.Identifier, launch_plan_models.LaunchPlanSpec]] = None, - compiled_closure: Optional[compiler_models.CompiledWorkflowClosure] = None, - should_register: bool = False, - ): - # TODO: Remove check - for node in nodes: - for upstream in node.upstream_nodes: - if upstream.id is None: - raise _user_exceptions.FlyteAssertion( - "Some nodes contained in the workflow were not found in the workflow description. Please " - "ensure all nodes are either assigned to attributes within the class or an element in a " - "list, dict, or tuple which is stored as an attribute in the class." - ) - - self._flyte_sub_workflows = subworkflows - template_subworkflows = [] - if subworkflows: - template_subworkflows = [swf.template for swf in subworkflows] - - super(FlyteWorkflow, self).__init__( - template=_workflow_models.WorkflowTemplate( - id=id, - metadata=metadata, - metadata_defaults=metadata_defaults, - interface=interface, - nodes=nodes, - outputs=output_bindings, - ), - sub_workflows=template_subworkflows, - ) - self._flyte_nodes = nodes - - # Optional things that we save for ease of access when promoting from a model or CompiledWorkflowClosure - self._tasks = tasks - self._launch_plans = launch_plans - self._compiled_closure = compiled_closure - self._node_map = None - self._name = id.name - self._should_register = should_register - - @property - def name(self) -> str: - return self._name - - @property - def flyte_tasks(self) -> Optional[List[FlyteTask]]: - return self._tasks - - @property - def should_register(self) -> bool: - return self._should_register - - @property - def flyte_sub_workflows(self) -> List[FlyteWorkflow]: - return self._flyte_sub_workflows - - @property - def entity_type_text(self) -> str: - return "Workflow" - - @property - def resource_type(self): - return id_models.ResourceType.WORKFLOW - - @property - def flyte_nodes(self) -> List[FlyteNode]: - return self._flyte_nodes - - @property - def id(self) -> Identifier: - """ - This is an autogenerated id by the system. The id is globally unique across Flyte. - """ - return self.template.id - - @property - def metadata(self) -> WorkflowMetadata: - """ - This contains information on how to run the workflow. - """ - return self.template.metadata - - @property - def metadata_defaults(self) -> WorkflowMetadataDefaults: - """ - This contains information on how to run the workflow. - :rtype: WorkflowMetadataDefaults - """ - return self.template.metadata_defaults - - @property - def interface(self) -> TypedInterface: - """ - Defines a strongly typed interface for the Workflow (inputs, outputs). This can include some optional - parameters. - """ - return self.template.interface - - @property - def nodes(self) -> List[Node]: - """ - A list of nodes. In addition, "globals" is a special reserved node id that can be used to consume - workflow inputs - """ - return self.template.nodes - - @property - def outputs(self) -> List[Binding]: - """ - A list of output bindings that specify how to construct workflow outputs. Bindings can - pull node outputs or specify literals. All workflow outputs specified in the interface field must be bound - in order for the workflow to be validated. A workflow has an implicit dependency on all of its nodes - to execute successfully in order to bind final outputs. - """ - return self.template.outputs - - @property - def failure_node(self) -> Node: - """ - Node failure_node: A catch-all node. This node is executed whenever the execution engine determines the - workflow has failed. The interface of this node must match the Workflow interface with an additional input - named "error" of type pb.lyft.flyte.core.Error. - """ - return self.template.failure_node - - @classmethod - def get_non_system_nodes(cls, nodes: List[_workflow_models.Node]) -> List[_workflow_models.Node]: - return [n for n in nodes if n.id not in {_constants.START_NODE_ID, _constants.END_NODE_ID}] - - @classmethod - def _promote_node( - cls, - model: _workflow_model.Node, - sub_workflows: Optional[Dict[id_models.Identifier, _workflow_model.WorkflowTemplate]], - node_launch_plans: Optional[Dict[id_models.Identifier, _launch_plan_model.LaunchPlanSpec]], - tasks: Dict[id_models.Identifier, FlyteTask], - converted_sub_workflows: Dict[id_models.Identifier, FlyteWorkflow], - ) -> Tuple[Optional[FlyteNode], Dict[id_models.Identifier, FlyteWorkflow]]: - return FlyteNode.promote_from_model(model, sub_workflows, node_launch_plans, tasks, converted_sub_workflows) - - @classmethod - def promote_from_model( - cls, - base_model: _workflow_models.WorkflowTemplate, - sub_workflows: Optional[Dict[Identifier, _workflow_models.WorkflowTemplate]] = None, - tasks: Optional[Dict[Identifier, FlyteTask]] = None, - node_launch_plans: Optional[Dict[Identifier, launch_plan_models.LaunchPlanSpec]] = None, - ) -> FlyteWorkflow: - base_model_non_system_nodes = cls.get_non_system_nodes(base_model.nodes) - - node_map = {} - converted_sub_workflows = {} - for node in base_model_non_system_nodes: - flyte_node, converted_sub_workflows = cls._promote_node( - node, sub_workflows, node_launch_plans, tasks, converted_sub_workflows - ) - node_map[node.id] = flyte_node - - # Set upstream nodes for each node - for n in base_model_non_system_nodes: - current = node_map[n.id] - for upstream_id in n.upstream_node_ids: - upstream_node = node_map[upstream_id] - current._upstream.append(upstream_node) - - subworkflow_list = [] - if converted_sub_workflows: - subworkflow_list = [v for _, v in converted_sub_workflows.items()] - - task_list = [] - if tasks: - task_list = [t for _, t in tasks.items()] - - # No inputs/outputs specified, see the constructor for more information on the overrides. - wf = cls( - id=base_model.id, - nodes=list(node_map.values()), - metadata=base_model.metadata, - metadata_defaults=base_model.metadata_defaults, - interface=_interfaces.TypedInterface.promote_from_model(base_model.interface), - output_bindings=base_model.outputs, - subworkflows=subworkflow_list, - tasks=task_list, - launch_plans=node_launch_plans, - ) - - wf._node_map = node_map - - return wf - - @classmethod - def _promote_task(cls, t: _task_models.TaskTemplate) -> FlyteTask: - return FlyteTask.promote_from_model(t) - - @classmethod - def promote_from_closure( - cls, - closure: compiler_models.CompiledWorkflowClosure, - node_launch_plans: Optional[Dict[id_models, launch_plan_models.LaunchPlanSpec]] = None, - ): - """ - Extracts out the relevant portions of a FlyteWorkflow from a closure from the control plane. - - :param closure: This is the closure returned by Admin - :param node_launch_plans: The reason this exists is because the compiled closure doesn't have launch plans. - It only has subworkflows and tasks. Why this is unclear. If supplied, this map of launch plans will be - """ - sub_workflows = {sw.template.id: sw.template for sw in closure.sub_workflows} - tasks = {} - if closure.tasks: - tasks = {t.template.id: cls._promote_task(t.template) for t in closure.tasks} - - flyte_wf = cls.promote_from_model( - base_model=closure.primary.template, - sub_workflows=sub_workflows, - node_launch_plans=node_launch_plans, - tasks=tasks, - ) - flyte_wf._compiled_closure = closure - return flyte_wf - - -class FlyteLaunchPlan(hash_mixin.HashOnReferenceMixin, RemoteEntity, _launch_plan_models.LaunchPlanSpec): - """A class encapsulating a remote Flyte launch plan.""" - - def __init__(self, id, *args, **kwargs): - super(FlyteLaunchPlan, self).__init__(*args, **kwargs) - # Set all the attributes we expect this class to have - self._id = id - self._name = id.name - - # The interface is not set explicitly unless fetched in an engine context - self._interface = None - # If fetched when creating this object, can store it here. - self._flyte_workflow = None - - @property - def name(self) -> str: - return self._name - - @property - def flyte_workflow(self) -> Optional[FlyteWorkflow]: - return self._flyte_workflow - - @classmethod - def promote_from_model(cls, id: id_models.Identifier, model: _launch_plan_models.LaunchPlanSpec) -> FlyteLaunchPlan: - lp = cls( - id=id, - workflow_id=model.workflow_id, - default_inputs=_interface_models.ParameterMap(model.default_inputs.parameters), - fixed_inputs=model.fixed_inputs, - entity_metadata=model.entity_metadata, - labels=model.labels, - annotations=model.annotations, - auth_role=model.auth_role, - raw_output_data_config=model.raw_output_data_config, - max_parallelism=model.max_parallelism, - security_context=model.security_context, - ) - return lp - - @property - def id(self) -> id_models.Identifier: - return self._id - - @property - def is_scheduled(self) -> bool: - if self.entity_metadata.schedule.cron_expression: - return True - elif self.entity_metadata.schedule.rate and self.entity_metadata.schedule.rate.value: - return True - elif self.entity_metadata.schedule.cron_schedule and self.entity_metadata.schedule.cron_schedule.schedule: - return True - else: - return False - - @property - def workflow_id(self) -> id_models.Identifier: - return self._workflow_id - - @property - def interface(self) -> Optional[_interface.TypedInterface]: - """ - The interface is not technically part of the admin.LaunchPlanSpec in the IDL, however the workflow ID is, and - from the workflow ID, fetch will fill in the interface. This is nice because then you can __call__ the= - object and get a node. - """ - return self._interface - - @property - def resource_type(self) -> id_models.ResourceType: - return id_models.ResourceType.LAUNCH_PLAN - - @property - def entity_type_text(self) -> str: - return "Launch Plan" - - def compile(self, ctx: FlyteContext, *args, **kwargs): - fixed_input_lits = self.fixed_inputs.literals or {} - default_input_params = self.default_inputs.parameters or {} - return create_and_link_node_from_remote( - ctx, - entity=self, - _inputs_not_allowed=set(fixed_input_lits.keys()), - _ignorable_inputs=set(default_input_params.keys()), - **kwargs, - ) # noqa - - def __repr__(self) -> str: - return f"FlyteLaunchPlan(ID: {self.id} Interface: {self.interface}) - Spec {super().__repr__()})" diff --git a/flyrs/clients/executions.py b/flyrs/clients/executions.py deleted file mode 100644 index 05724bb868..0000000000 --- a/flyrs/clients/executions.py +++ /dev/null @@ -1,212 +0,0 @@ -from __future__ import annotations - -from abc import abstractmethod -from typing import Dict, List, Optional, Union - -from flytekit.core.type_engine import LiteralsResolver -from flytekit.exceptions import user as user_exceptions -from flytekit.models import execution as execution_models -from flytekit.models import node_execution as node_execution_models -from flytekit.models.admin import task_execution as admin_task_execution_models -from flytekit.models.core import execution as core_execution_models -from clients.entities import FlyteTask, FlyteWorkflow - - -class RemoteExecutionBase(object): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._inputs: Optional[LiteralsResolver] = None - self._outputs: Optional[LiteralsResolver] = None - - @property - def inputs(self) -> Optional[LiteralsResolver]: - return self._inputs - - @property - @abstractmethod - def error(self) -> core_execution_models.ExecutionError: - ... - - @property - @abstractmethod - def is_done(self) -> bool: - ... - - @property - def outputs(self) -> Optional[LiteralsResolver]: - """ - :return: Returns the outputs LiteralsResolver to the execution - :raises: ``FlyteAssertion`` error if execution is in progress or execution ended in error. - """ - if not self.is_done: - raise user_exceptions.FlyteAssertion( - "Please wait until the execution has completed before requesting the outputs." - ) - if self.error: - raise user_exceptions.FlyteAssertion("Outputs could not be found because the execution ended in failure.") - - return self._outputs - - -class FlyteTaskExecution(RemoteExecutionBase, admin_task_execution_models.TaskExecution): - """A class encapsulating a task execution being run on a Flyte remote backend.""" - - def __init__(self, *args, **kwargs): - super(FlyteTaskExecution, self).__init__(*args, **kwargs) - self._flyte_task = None - - @property - def task(self) -> Optional[FlyteTask]: - return self._flyte_task - - @property - def is_done(self) -> bool: - """Whether or not the execution is complete.""" - return self.closure.phase in { - core_execution_models.TaskExecutionPhase.ABORTED, - core_execution_models.TaskExecutionPhase.FAILED, - core_execution_models.TaskExecutionPhase.SUCCEEDED, - } - - @property - def error(self) -> Optional[core_execution_models.ExecutionError]: - """ - If execution is in progress, raise an exception. Otherwise, return None if no error was present upon - reaching completion. - """ - if not self.is_done: - raise user_exceptions.FlyteAssertion( - "Please what until the task execution has completed before requesting error information." - ) - return self.closure.error - - @classmethod - def promote_from_model(cls, base_model: admin_task_execution_models.TaskExecution) -> "FlyteTaskExecution": - return cls( - closure=base_model.closure, - id=base_model.id, - input_uri=base_model.input_uri, - is_parent=base_model.is_parent, - ) - - -class FlyteWorkflowExecution(RemoteExecutionBase, execution_models.Execution): - """A class encapsulating a workflow execution being run on a Flyte remote backend.""" - - def __init__(self, *args, **kwargs): - super(FlyteWorkflowExecution, self).__init__(*args, **kwargs) - self._node_executions = None - self._flyte_workflow: Optional[FlyteWorkflow] = None - - @property - def flyte_workflow(self) -> Optional[FlyteWorkflow]: - return self._flyte_workflow - - @property - def node_executions(self) -> Dict[str, "FlyteNodeExecution"]: - """Get a dictionary of node executions that are a part of this workflow execution.""" - return self._node_executions or {} - - @property - def error(self) -> core_execution_models.ExecutionError: - """ - If execution is in progress, raise an exception. Otherwise, return None if no error was present upon - reaching completion. - """ - if not self.is_done: - raise user_exceptions.FlyteAssertion( - "Please wait until a workflow has completed before checking for an error." - ) - return self.closure.error - - @property - def is_done(self) -> bool: - """ - Whether or not the execution is complete. - """ - return self.closure.phase in { - core_execution_models.WorkflowExecutionPhase.ABORTED, - core_execution_models.WorkflowExecutionPhase.FAILED, - core_execution_models.WorkflowExecutionPhase.SUCCEEDED, - core_execution_models.WorkflowExecutionPhase.TIMED_OUT, - } - - @classmethod - def promote_from_model(cls, base_model: execution_models.Execution) -> "FlyteWorkflowExecution": - return cls( - closure=base_model.closure, - id=base_model.id, - spec=base_model.spec, - ) - - -class FlyteNodeExecution(RemoteExecutionBase, node_execution_models.NodeExecution): - """A class encapsulating a node execution being run on a Flyte remote backend.""" - - def __init__(self, *args, **kwargs): - super(FlyteNodeExecution, self).__init__(*args, **kwargs) - self._task_executions = None - self._workflow_executions = [] - self._underlying_node_executions = None - self._interface = None - self._flyte_node = None - - @property - def task_executions(self) -> List[FlyteTaskExecution]: - return self._task_executions or [] - - @property - def workflow_executions(self) -> List[FlyteWorkflowExecution]: - return self._workflow_executions - - @property - def subworkflow_node_executions(self) -> Dict[str, FlyteNodeExecution]: - """ - This returns underlying node executions in instances where the current node execution is - a parent node. This happens when it's either a static or dynamic subworkflow. - """ - return ( - {} - if self._underlying_node_executions is None - else {n.id.node_id: n for n in self._underlying_node_executions} - ) - - @property - def executions(self) -> List[Union[FlyteTaskExecution, FlyteWorkflowExecution]]: - return self.task_executions or self._underlying_node_executions or [] - - @property - def error(self) -> core_execution_models.ExecutionError: - """ - If execution is in progress, raise an exception. Otherwise, return None if no error was present upon - reaching completion. - """ - if not self.is_done: - raise user_exceptions.FlyteAssertion( - "Please wait until the node execution has completed before requesting error information." - ) - return self.closure.error - - @property - def is_done(self) -> bool: - """Whether or not the execution is complete.""" - return self.closure.phase in { - core_execution_models.NodeExecutionPhase.ABORTED, - core_execution_models.NodeExecutionPhase.FAILED, - core_execution_models.NodeExecutionPhase.SKIPPED, - core_execution_models.NodeExecutionPhase.SUCCEEDED, - core_execution_models.NodeExecutionPhase.TIMED_OUT, - } - - @classmethod - def promote_from_model(cls, base_model: node_execution_models.NodeExecution) -> "FlyteNodeExecution": - return cls( - closure=base_model.closure, id=base_model.id, input_uri=base_model.input_uri, metadata=base_model.metadata - ) - - @property - def interface(self) -> "flytekit.remote.interface.TypedInterface": - """ - Return the interface of the task or subworkflow associated with this node execution. - """ - return self._interface diff --git a/flyrs/clients/interface.py b/flyrs/clients/interface.py deleted file mode 100644 index df61c8e336..0000000000 --- a/flyrs/clients/interface.py +++ /dev/null @@ -1,11 +0,0 @@ -from flytekit.models import interface as _interface_models - - -class TypedInterface(_interface_models.TypedInterface): - @classmethod - def promote_from_model(cls, model): - """ - :param flytekit.models.interface.TypedInterface model: - :rtype: TypedInterface - """ - return cls(model.inputs, model.outputs) diff --git a/flyrs/clients/lazy_entity.py b/flyrs/clients/lazy_entity.py deleted file mode 100644 index c9cb803267..0000000000 --- a/flyrs/clients/lazy_entity.py +++ /dev/null @@ -1,67 +0,0 @@ -import typing -from threading import Lock - -from flytekit import FlyteContext -from clients.remote_callable import RemoteEntity - -T = typing.TypeVar("T", bound=RemoteEntity) - - -class LazyEntity(RemoteEntity, typing.Generic[T]): - """ - Fetches the entity when the entity is called or when the entity is retrieved. - The entity is derived from RemoteEntity so that it behaves exactly like the mimicked entity. - """ - - def __init__(self, name: str, getter: typing.Callable[[], T], *args, **kwargs): - super().__init__(*args, **kwargs) - self._entity = None - self._getter = getter - self._name = name - if not self._getter: - raise ValueError("getter method is required to create a Lazy loadable Remote Entity.") - self._mutex = Lock() - - @property - def name(self) -> str: - return self._name - - def entity_fetched(self) -> bool: - with self._mutex: - return self._entity is not None - - @property - def entity(self) -> T: - """ - If not already fetched / available, then the entity will be force fetched. - """ - with self._mutex: - if self._entity is None: - try: - self._entity = self._getter() - except AttributeError as e: - raise RuntimeError( - f"Error downloading the entity {self._name}, (check original exception...)" - ) from e - return self._entity - - def __getattr__(self, item: str) -> typing.Any: - """ - Forwards all other attributes to entity, causing the entity to be fetched! - """ - return getattr(self.entity, item) - - def compile(self, ctx: FlyteContext, *args, **kwargs): - return self.entity.compile(ctx, *args, **kwargs) - - def __call__(self, *args, **kwargs): - """ - Forwards the call to the underlying entity. The entity will be fetched if not already present - """ - return self.entity(*args, **kwargs) - - def __repr__(self) -> str: - return str(self) - - def __str__(self) -> str: - return f"Promise for entity [{self._name}]" diff --git a/flyrs/clients/remote_callable.py b/flyrs/clients/remote_callable.py deleted file mode 100644 index 5b177bf7c4..0000000000 --- a/flyrs/clients/remote_callable.py +++ /dev/null @@ -1,75 +0,0 @@ -from abc import ABC, abstractmethod -from typing import Any, Dict, Optional, Tuple, Type, Union - -from flytekit.core.context_manager import BranchEvalMode, ExecutionState, FlyteContext -from flytekit.core.promise import Promise, VoidPromise, create_and_link_node_from_remote, extract_obj_name -from flytekit.exceptions import user as user_exceptions -from flytekit.loggers import logger -from flytekit.models.core.workflow import NodeMetadata - - -class RemoteEntity(ABC): - def __init__(self, *args, **kwargs): - # In cases where we make a FlyteTask/Workflow/LaunchPlan from a locally created Python object (i.e. an @task - # or an @workflow decorated function), we actually have the Python interface, so - self._python_interface: Optional[Dict[str, Type]] = None - - super().__init__(*args, **kwargs) - - @property - @abstractmethod - def name(self) -> str: - ... - - def construct_node_metadata(self) -> NodeMetadata: - """ - Used when constructing the node that encapsulates this task as part of a broader workflow definition. - """ - return NodeMetadata( - name=extract_obj_name(self.name), - ) - - def compile(self, ctx: FlyteContext, *args, **kwargs): - return create_and_link_node_from_remote(ctx, entity=self, **kwargs) # noqa - - def __call__(self, *args, **kwargs): - # When a Task is () aka __called__, there are three things we may do: - # a. Plain execution Mode - just run the execute function. If not overridden, we should raise an exception - # b. Compilation Mode - this happens when the function is called as part of a workflow (potentially - # dynamic task). Produce promise objects and create a node. - # c. Workflow Execution Mode - when a workflow is being run locally. Even though workflows are functions - # and everything should be able to be passed through naturally, we'll want to wrap output values of the - # function into objects, so that potential .with_cpu or other ancillary functions can be attached to do - # nothing. Subsequent tasks will have to know how to unwrap these. If by chance a non-Flyte task uses a - # task output as an input, things probably will fail pretty obviously. - # Since this is a reference entity, it still needs to be mocked otherwise an exception will be raised. - if len(args) > 0: - raise user_exceptions.FlyteAssertion( - f"Cannot call remotely fetched entity with args - detected {len(args)} positional args {args}" - ) - - ctx = FlyteContext.current_context() - if ctx.compilation_state is not None and ctx.compilation_state.mode == 1: - return self.compile(ctx, *args, **kwargs) - elif ( - ctx.execution_state is not None and ctx.execution_state.mode == ExecutionState.Mode.LOCAL_WORKFLOW_EXECUTION - ): - if ctx.execution_state.branch_eval_mode == BranchEvalMode.BRANCH_SKIPPED: - return - return self.local_execute(ctx, **kwargs) - else: - logger.debug("Fetched entity, running raw execute.") - return self.execute(**kwargs) - - def local_execute(self, ctx: FlyteContext, **kwargs) -> Optional[Union[Tuple[Promise], Promise, VoidPromise]]: - return self.execute(**kwargs) - - def local_execution_mode(self) -> ExecutionState.Mode: - return ExecutionState.Mode.LOCAL_TASK_EXECUTION - - def execute(self, **kwargs) -> Any: - raise AssertionError(f"Remotely fetched entities cannot be run locally. Please mock the {self.name}.execute.") - - @property - def python_interface(self) -> Optional[Dict[str, Type]]: - return self._python_interface diff --git a/flyrs/clients/remote_fs.py b/flyrs/clients/remote_fs.py deleted file mode 100644 index 10131f63fa..0000000000 --- a/flyrs/clients/remote_fs.py +++ /dev/null @@ -1,266 +0,0 @@ -from __future__ import annotations - -import base64 -import hashlib -import os -import pathlib -import random -import threading -import typing -from uuid import UUID - -import fsspec -import requests -from fsspec.callbacks import NoOpCallback -from fsspec.implementations.http import HTTPFileSystem -from fsspec.utils import get_protocol - -from flytekit.loggers import logger -from flytekit.tools.script_mode import hash_file - -if typing.TYPE_CHECKING: - from flytekit.remote.remote import FlyteRemote - -_DEFAULT_CALLBACK = NoOpCallback() -_PREFIX_KEY = "upload_prefix" -_HASHES_KEY = "hashes" -# This file system is not really a filesystem, so users aren't really able to specify the remote path, -# at least not yet. -REMOTE_PLACEHOLDER = "flyte://data" - -HashStructure = typing.Dict[str, typing.Tuple[bytes, int]] - - -class FlytePathResolver: - protocol = "flyte://" - _flyte_path_to_remote_map: typing.Dict[str, str] = {} - _lock = threading.Lock() - - @classmethod - def resolve_remote_path(cls, flyte_uri: str) -> typing.Optional[str]: - """ - Given a flyte uri, return the remote path if it exists or was created in current session, otherwise return None - """ - with cls._lock: - if flyte_uri in cls._flyte_path_to_remote_map: - return cls._flyte_path_to_remote_map[flyte_uri] - return None - - @classmethod - def add_mapping(cls, flyte_uri: str, remote_path: str): - """ - Thread safe method to dd a mapping from a flyte uri to a remote path - """ - with cls._lock: - cls._flyte_path_to_remote_map[flyte_uri] = remote_path - - -class HttpFileWriter(fsspec.spec.AbstractBufferedFile): - def __init__(self, remote: FlyteRemote, filename: str, **kwargs): - super().__init__(**kwargs) - self._remote = remote - self._filename = filename - - def _upload_chunk(self, final=False): - """Only uploads the file at once from the buffer. - Not suitable for large files as the buffer will blow the memory for very large files. - Suitable for default values or local dataframes being uploaded all at once. - """ - if final is False: - return False - self.buffer.seek(0) - data = self.buffer.read() - - try: - res = self._remote.client.get_upload_signed_url( - self._remote.default_project, - self._remote.default_domain, - None, - None, - filename_root=self._filename, - ) - FlytePathResolver.add_mapping(self.path, res.native_url) - resp = requests.put(res.signed_url, data=data) - if not resp.ok: - raise AssertionError(f"Failed to upload file {self._filename} to {res.signed_url} reason {resp.reason}") - except Exception as e: - raise AssertionError(f"Failed to upload file {self._filename} reason {e}") - - -def get_flyte_fs(remote: FlyteRemote) -> typing.Type[FlyteFS]: - class _FlyteFS(FlyteFS): - def __init__(self, **storage_options): - super().__init__(remote=remote, **storage_options) - - return _FlyteFS - - -class FlyteFS(HTTPFileSystem): - """ - Want this to behave mostly just like the HTTP file system. - """ - - sep = "/" - protocol = "flyte" - - def __init__( - self, - remote: FlyteRemote, - asynchronous: bool = False, - **storage_options, - ): - super().__init__(asynchronous=asynchronous, **storage_options) - self._remote = remote - - @property - def fsid(self) -> str: - return "flyte" - - async def _get_file(self, rpath, lpath, **kwargs): - """ - Don't do anything special. If it's a flyte url, the create a download link and write to lpath, - otherwise default to parent. - """ - raise NotImplementedError("FlyteFS currently doesn't support downloading files.") - - async def _put_file( - self, - lpath, - rpath, - chunk_size=5 * 2**20, - callback=_DEFAULT_CALLBACK, - method="put", - **kwargs, - ): - """ - fsspec will call this method to upload a file. If recursive, rpath will already be individual files. - Make the request and upload, but then how do we get the s3 paths back to the user? - """ - prefix = kwargs.pop(_PREFIX_KEY) - _, native_url = self._remote.upload_file( - pathlib.Path(lpath), self._remote.default_project, self._remote.default_domain, prefix - ) - return native_url - - @staticmethod - def extract_common(native_urls: typing.List[str]) -> str: - """ - This function that will take a list of strings and return the longest prefix that they all have in common. - That is, if you have - ['s3://my-s3-bucket/flytesnacks/development/ABCYZWMPACZAJ2MABGMOZ6CCPY======/source/empty.md', - 's3://my-s3-bucket/flytesnacks/development/ABCXKL5ZZWXY3PDLM3OONUHHME======/source/nested/more.txt', - 's3://my-s3-bucket/flytesnacks/development/ABCXBAPBKONMADXVW5Q3J6YBWM======/source/original.txt'] - this will return back 's3://my-s3-bucket/flytesnacks/development/' - Note that trailing characters after a separator that just happen to be the same will also be stripped. - """ - if len(native_urls) == 0: - return "" - if len(native_urls) == 1: - return native_urls[0] - - common_prefix = "" - shortest = min([len(x) for x in native_urls]) - x = [[native_urls[j][i] for j in range(len(native_urls))] for i in range(shortest)] - for i in x: - if len(set(i)) == 1: - common_prefix += i[0] - else: - break - - fs = fsspec.filesystem(get_protocol(native_urls[0])) - sep = fs.sep - # split the common prefix on the last separator so we don't get any trailing characters. - common_prefix = common_prefix.rsplit(sep, 1)[0] - logger.debug(f"Returning {common_prefix} from {native_urls}") - return common_prefix - - def get_hashes_and_lengths(self, p: pathlib.Path) -> HashStructure: - """ - Returns a flat list of absolute file paths to their hashes and content lengths - this output is used both for the file upload request, and to create consistently a filename root for - uploaded folders. We'll also use it for single files just for consistency. - If a directory then all the files in the directory will be hashed. - If a single file then just that file will be hashed. - Skip symlinks - """ - if p.is_symlink(): - return {} - if p.is_dir(): - hashes = {} - for f in p.iterdir(): - hashes.update(self.get_hashes_and_lengths(f)) - return hashes - else: - md5_bytes, _, content_length = hash_file(p.resolve()) - return {str(p.absolute()): (md5_bytes, content_length)} - - @staticmethod - def get_filename_root(file_info: HashStructure) -> str: - """ - Given a dictionary of file paths to hashes and content lengths, return a consistent filename root. - This is done by hashing the sorted list of file paths and then base32 encoding the result. - If the input is empty, then generate a random string - """ - if len(file_info) == 0: - return UUID(int=random.getrandbits(128)).hex - sorted_paths = sorted(file_info.keys()) - h = hashlib.md5() - for p in sorted_paths: - h.update(file_info[p][0]) - return base64.b32encode(h.digest()).decode("utf-8") - - async def _put( - self, - lpath, - rpath, - recursive=False, - callback=_DEFAULT_CALLBACK, - batch_size=None, - **kwargs, - ): - """ - cp file.txt flyte://data/... - rpath gets ignored, so it doesn't matter what it is. - """ - # Hash everything at the top level - file_info = self.get_hashes_and_lengths(pathlib.Path(lpath)) - prefix = self.get_filename_root(file_info) - - kwargs[_PREFIX_KEY] = prefix - kwargs[_HASHES_KEY] = file_info - res = await super()._put(lpath, REMOTE_PLACEHOLDER, recursive, callback, batch_size, **kwargs) - if isinstance(res, list): - res = self.extract_common(res) - FlytePathResolver.add_mapping(rpath.strip(os.path.sep), res) - return res - - async def _isdir(self, path): - return True - - def exists(self, path, **kwargs): - raise NotImplementedError("flyte file system currently can't check if a file exists.") - - def _open( - self, - path, - mode="wb", - block_size=None, - autocommit=None, # XXX: This differs from the base class. - cache_type=None, - cache_options=None, - size=None, - **kwargs, - ): - if mode != "wb": - raise ValueError("Only wb mode is supported") - - # Dataframes are written as multiple files, default is the first file with 00000 suffix, we should drop - # that suffix and use the parent directory as the remote path. - - return HttpFileWriter( - self._remote, os.path.basename(path), fs=self, path=os.path.dirname(path), mode=mode, **kwargs - ) - - def __str__(self): - p = super().__str__() - return f"FlyteFS({self._remote}): {p}" diff --git a/flyrs/remote/remote.py b/flyrs/remote/remote.py index 2b7c390d53..285f201f0a 100644 --- a/flyrs/remote/remote.py +++ b/flyrs/remote/remote.py @@ -73,14 +73,14 @@ ) from flytekit.models.launch_plan import LaunchPlanState from flytekit.models.literals import Literal, LiteralMap -from clients.backfill import create_backfill_workflow -from clients.data import download_literal -from clients.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow -from clients.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution -from clients.interface import TypedInterface -from clients.lazy_entity import LazyEntity -from clients.remote_callable import RemoteEntity -from clients.remote_fs import get_flyte_fs +from flytekit.remote.backfill import create_backfill_workflow +from flytekit.remote.data import download_literal +from flytekit.remote.entities import FlyteLaunchPlan, FlyteNode, FlyteTask, FlyteTaskNode, FlyteWorkflow +from flytekit.remote.executions import FlyteNodeExecution, FlyteTaskExecution, FlyteWorkflowExecution +from flytekit.remote.interface import TypedInterface +from flytekit.remote.lazy_entity import LazyEntity +from flytekit.remote.remote_callable import RemoteEntity +from flytekit.remote.remote_fs import get_flyte_fs from flytekit.tools.fast_registration import fast_package from flytekit.tools.interactive import ipython_check from flytekit.tools.script_mode import _find_project_root, compress_scripts, hash_file From c467a8f512ed574d015436029872f231edb9a9d3 Mon Sep 17 00:00:00 2001 From: Austin Liu Date: Sun, 21 Apr 2024 15:43:46 +0800 Subject: [PATCH 16/16] cleanup Signed-off-by: Austin Liu --- flyrs/.gitignore | 10 ---------- flyrs/pyproject.toml | 12 ++++++++++++ 2 files changed, 12 insertions(+), 10 deletions(-) delete mode 100644 flyrs/.gitignore create mode 100644 flyrs/pyproject.toml diff --git a/flyrs/.gitignore b/flyrs/.gitignore deleted file mode 100644 index 15640f3707..0000000000 --- a/flyrs/.gitignore +++ /dev/null @@ -1,10 +0,0 @@ -/target - -pyproject.toml - - -# Added by cargo -# -# already existing elements were commented out - -#/target diff --git a/flyrs/pyproject.toml b/flyrs/pyproject.toml new file mode 100644 index 0000000000..7713ba4317 --- /dev/null +++ b/flyrs/pyproject.toml @@ -0,0 +1,12 @@ +[build-system] +requires = ["maturin>=1,<2"] +build-backend = "maturin" + +[project] +name = "flyrs" +requires-python = ">=3.7" +classifiers = [ + "Programming Language :: Rust", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] \ No newline at end of file