Skip to content

Commit

Permalink
[DATALAD RUNCMD] fix ambigous typos manually
Browse files Browse the repository at this point in the history
=== Do not change lines below ===
{
 "chain": [],
 "cmd": "codespell -i3 -C4 -w",
 "exit": 0,
 "extra_inputs": [],
 "inputs": [],
 "outputs": [],
 "pwd": "."
}
^^^ Do not change lines above ^^^

Signed-off-by: Yaroslav Halchenko <[email protected]>
  • Loading branch information
yarikoptic committed Sep 20, 2023
1 parent 23eb5a5 commit ff32044
Show file tree
Hide file tree
Showing 13 changed files with 16 additions and 16 deletions.
4 changes: 2 additions & 2 deletions CHANGELOG/CHANGELOG-v0.18.2.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ See the flytekit [0.25.0 release notes](https://github.com/flyteorg/flytekit/rel
...
minio:
...
countour:
contour:
...
```
Expand All @@ -56,7 +56,7 @@ See the flytekit [0.25.0 release notes](https://github.com/flyteorg/flytekit/rel
...
minio:
...
countour:
contour:
...
```
* Alternatively, if you do not have any dependency on external flyte dependencies, you can keep your ``myvalues.yaml`` and switch to using ``flyte-core`` helm chart directly with no changes.
2 changes: 1 addition & 1 deletion boilerplate/flyte/end2end/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def schedule_workflow_groups(
terminate_workflow_on_failure: bool,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
Expand Down
2 changes: 1 addition & 1 deletion datacatalog/boilerplate/flyte/end2end/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def schedule_workflow_groups(
terminate_workflow_on_failure: bool,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
Expand Down
2 changes: 1 addition & 1 deletion flyteadmin/boilerplate/flyte/end2end/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def schedule_workflow_groups(
terminate_workflow_on_failure: bool,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
Expand Down
6 changes: 3 additions & 3 deletions flyteadmin/pkg/manager/impl/metrics_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ func (m *MetricsManager) parseBranchNodeExecution(ctx context.Context,

*spans = append(*spans, nodeExecutionSpan)

// backened overhead
// backend overhead
if !nodeExecution.Closure.UpdatedAt.AsTime().Before(branchNodeExecution.Closure.UpdatedAt.AsTime()) {
*spans = append(*spans, createOperationSpan(branchNodeExecution.Closure.UpdatedAt,
nodeExecution.Closure.UpdatedAt, nodeTeardown))
Expand Down Expand Up @@ -271,7 +271,7 @@ func (m *MetricsManager) parseDynamicNodeExecution(ctx context.Context, nodeExec
return err
}

// backened overhead
// backend overhead
latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID,
nodeExecutionData.DynamicWorkflow.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions)
if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) {
Expand Down Expand Up @@ -540,7 +540,7 @@ func (m *MetricsManager) parseSubworkflowNodeExecution(ctx context.Context,
return err
}

// backened overhead
// backend overhead
latestUpstreamNode := m.getLatestUpstreamNodeExecution(v1alpha1.EndNodeID,
workflow.Closure.CompiledWorkflow.Primary.Connections.Upstream, nodeExecutions)
if latestUpstreamNode != nil && !nodeExecution.Closure.UpdatedAt.AsTime().Before(latestUpstreamNode.Closure.UpdatedAt.AsTime()) {
Expand Down
2 changes: 1 addition & 1 deletion flyteadmin/pkg/repositories/transformers/execution.go
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ func FromExecutionModel(ctx context.Context, executionModel models.Execution, op
}, nil
}

// PopulateDefaultStateChangeDetails used to populate execution state change details for older executions which donot
// PopulateDefaultStateChangeDetails used to populate execution state change details for older executions which do not
// have these details captured. Hence we construct a default state change details from existing data model.
func PopulateDefaultStateChangeDetails(executionModel models.Execution) (*admin.ExecutionStateChangeDetails, error) {
var err error
Expand Down
2 changes: 1 addition & 1 deletion flyteadmin/pkg/repositories/transformers/task_execution.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ func mergeCustom(existing, latest *_struct.Struct) (*_struct.Struct, error) {
return &response, nil
}

// mergeExternalResource combines the lastest ExternalResourceInfo proto with an existing instance
// mergeExternalResource combines the latest ExternalResourceInfo proto with an existing instance
// by updating fields and merging logs.
func mergeExternalResource(existing, latest *event.ExternalResourceInfo) *event.ExternalResourceInfo {
if existing == nil {
Expand Down
2 changes: 1 addition & 1 deletion flyteadmin/scheduler/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
// 1] Schedule management
// This component is part of the pkg/async/schedule/flytescheduler package
// Role of this component is to create / activate / deactivate schedules
// The above actions are exposed through launchplan activation/deactivation api's and donot have separate controls.
// The above actions are exposed through launchplan activation/deactivation api's and do not have separate controls.
// Whenever a launchplan with a schedule is activated, a new schedule entry is created in the datastore
// On deactivation the created scheduled and launchplan is deactivated through a flag
// Atmost one launchplan is active at any moment across its various versions and same semantics apply for the
Expand Down
2 changes: 1 addition & 1 deletion flytecopilot/boilerplate/flyte/end2end/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def schedule_workflow_groups(
terminate_workflow_on_failure: bool,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
Expand Down
2 changes: 1 addition & 1 deletion flytepropeller/boilerplate/flyte/end2end/run-tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def schedule_workflow_groups(
terminate_workflow_on_failure: bool,
) -> Dict[str, bool]:
"""
Schedule workflows executions for all workflow gropus and return True if all executions succeed, otherwise
Schedule workflows executions for all workflow groups and return True if all executions succeed, otherwise
return False.
"""
executions_by_wfgroup = {}
Expand Down
2 changes: 1 addition & 1 deletion flytepropeller/pkg/webhook/aws_secret_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ const (
)

var (
// AWSSecretMountPathPrefix defins the default mount path for secrets
// AWSSecretMountPathPrefix defines the default mount path for secrets
AWSSecretMountPathPrefix = []string{string(os.PathSeparator), "etc", "flyte", "secrets"}
)

Expand Down
2 changes: 1 addition & 1 deletion rfc/core language/1461-cache-serialize-api.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ Reservation requests will include a requested heartbeat-interval-seconds configu
### Datacatalog Managed Reservations
The `datacatalog` service will be responsible for managing cache reservations. This will entail the addition of a new ReservationManager and ReservationRepo (with gorm implementation) per the project standards. Additionally it requires a new table in the db where reservations are uniquely defined based on DatasetID and an artifact tag.

All database operations are performed with write consistency, where records are only inserted or updated on restrictive conditions. This eliminates the possibility for race conditions. Where two executions attempt to acquire a cache reservation simultaneously, only one can succeeed.
All database operations are performed with write consistency, where records are only inserted or updated on restrictive conditions. This eliminates the possibility for race conditions. Where two executions attempt to acquire a cache reservation simultaneously, only one can succeed.

Additionally, the `datacatalog` configuration file defines max-heartbeat-interval-seconds and heartbeat-grace-period-multiplier to define the maximum heartbeat interval of reservation extensions and set the reservation expiration (computed as heartbeat-interval-seconds * heartbeat-grace-period-multiplier).

Expand Down
2 changes: 1 addition & 1 deletion rsts/concepts/architecture.rst
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ Complex task types require workloads to be distributed across hundreds of pods.

The type-specific task logic is separated into isolated code modules known as **plugins**.
Each task type has an associated plugin that is responsible for handling tasks of its type.
For each task in a workflow, FlytePropeller activates the appropriate plugin based on the task type in order to fullfill the task.
For each task in a workflow, FlytePropeller activates the appropriate plugin based on the task type in order to fulfill the task.

The Flyte team has pre-built plugins for Hive, Spark, AWS Batch, and :ref:`more <integrations>`.
To support new use-cases, developers can create their own plugins and bundle them in their FlytePropeller deployment.
Expand Down

0 comments on commit ff32044

Please sign in to comment.