From d28bfbdc366627c9ac9f57fcaa512ff04de19d6f Mon Sep 17 00:00:00 2001 From: Ilia Sergachev Date: Mon, 6 Jan 2025 20:24:53 -0800 Subject: [PATCH] PR #20996: [GPU][NFC] Fix a mistype. Imported from GitHub PR https://github.com/openxla/xla/pull/20996 Copybara import of the project: -- f6f4a3f81f0cd893e6fcc9c99ab03732a32c1af7 by Ilia Sergachev : [GPU][NFC] Fix a mistype. Merging this change closes #20996 COPYBARA_INTEGRATE_REVIEW=https://github.com/openxla/xla/pull/20996 from openxla:fix_mistype f6f4a3f81f0cd893e6fcc9c99ab03732a32c1af7 PiperOrigin-RevId: 712745893 --- xla/service/gpu/runtime/command_buffer_cmd.h | 2 +- xla/stream_executor/command_buffer.h | 26 +++++++++---------- .../cuda/cuda_command_buffer.cc | 2 +- xla/stream_executor/gpu/gpu_command_buffer.cc | 12 ++++----- xla/stream_executor/gpu/gpu_command_buffer.h | 4 +-- 5 files changed, 23 insertions(+), 23 deletions(-) diff --git a/xla/service/gpu/runtime/command_buffer_cmd.h b/xla/service/gpu/runtime/command_buffer_cmd.h index e02c6b470ca51..eb08838644a6e 100644 --- a/xla/service/gpu/runtime/command_buffer_cmd.h +++ b/xla/service/gpu/runtime/command_buffer_cmd.h @@ -210,7 +210,7 @@ class CommandBufferCmd { // This argument allows conditional commands to record a command sequence // into non-default execution scope. se::CommandBuffer::ExecutionScopeId execution_scope_id = - se::CommandBuffer::kDefaulExecutionScope; + se::CommandBuffer::kDefaultExecutionScope; }; // See Thunk documentation for XLA execution stages (prepare, initialize, diff --git a/xla/stream_executor/command_buffer.h b/xla/stream_executor/command_buffer.h index fd4c8cc9404f7..bb56f0f0c3ca4 100644 --- a/xla/stream_executor/command_buffer.h +++ b/xla/stream_executor/command_buffer.h @@ -52,7 +52,7 @@ class CommandBuffer { // Execution scope enables fine-grained synchronization scopes inside // commands buffers. Implementation is very backend-specific and for CUDA/ROCM // backends it's implemented as DAG edges. By default all commands launched in - // the `kDefaulExecutionScope` execution scope. + // the `kDefaultExecutionScope` execution scope. // // Example #1: independent execution scopes and independent barriers // @@ -114,7 +114,7 @@ class CommandBuffer { // semantics as stream wait operation. // TSL_LIB_GTL_DEFINE_INT_TYPE(ExecutionScopeId, uint64_t); - static constexpr auto kDefaulExecutionScope = ExecutionScopeId(0); + static constexpr auto kDefaultExecutionScope = ExecutionScopeId(0); // Builder constructs nested command buffers owned by a parent command buffer. // @@ -188,7 +188,7 @@ class CommandBuffer { ExecutionScopeId to_execution_scope_id) = 0; // Adds an execution barrier to the default execution scope. - absl::Status Barrier() { return Barrier(kDefaulExecutionScope); } + absl::Status Barrier() { return Barrier(kDefaultExecutionScope); } // Adds a kernel launch command. virtual absl::Status Launch(ExecutionScopeId execution_scope_id, @@ -198,7 +198,7 @@ class CommandBuffer { // Adds a kernel launch command to the default execution scope. absl::Status Launch(const ThreadDim& threads, const BlockDim& blocks, const Kernel& kernel, const KernelArgs& args) { - return Launch(kDefaulExecutionScope, threads, blocks, kernel, args); + return Launch(kDefaultExecutionScope, threads, blocks, kernel, args); } // Type-safe wrapper for launching typed kernels. Notice that the order of @@ -214,7 +214,7 @@ class CommandBuffer { absl::Status Launch(const TypedKernel& kernel, const ThreadDim& threads, const BlockDim& blocks, Args... args) { - return Launch(kernel, kDefaulExecutionScope, threads, blocks, args...); + return Launch(kernel, kDefaultExecutionScope, threads, blocks, args...); } // Adds a nested command buffer. @@ -223,7 +223,7 @@ class CommandBuffer { // Adds a nested command buffer to the default execution scope. absl::Status AddNestedCommandBuffer(const CommandBuffer& nested) { - return AddNestedCommandBuffer(kDefaulExecutionScope, nested); + return AddNestedCommandBuffer(kDefaultExecutionScope, nested); } // Adds a device-to-device memory copy. @@ -236,7 +236,7 @@ class CommandBuffer { absl::Status MemcpyDeviceToDevice(DeviceMemoryBase* dst, const DeviceMemoryBase& src, uint64_t size) { - return MemcpyDeviceToDevice(kDefaulExecutionScope, dst, src, size); + return MemcpyDeviceToDevice(kDefaultExecutionScope, dst, src, size); } // Adds a memset command. @@ -247,7 +247,7 @@ class CommandBuffer { // Adds a memset command to the default execution scope. absl::Status Memset(DeviceMemoryBase* dst, BitPattern bit_pattern, size_t num_elements) { - return Memset(kDefaulExecutionScope, dst, bit_pattern, num_elements); + return Memset(kDefaultExecutionScope, dst, bit_pattern, num_elements); } //--------------------------------------------------------------------------// @@ -261,7 +261,7 @@ class CommandBuffer { // Adds a conditional If operation to default execution scope. absl::Status If(DeviceMemory pred, Builder then_builder) { - return If(kDefaulExecutionScope, pred, then_builder); + return If(kDefaultExecutionScope, pred, then_builder); } // Adds a conditional operation that will execute a command buffer constructed @@ -274,7 +274,7 @@ class CommandBuffer { // Adds a conditional IfElse operation to default execution scope. absl::Status IfElse(DeviceMemory pred, Builder then_builder, Builder else_builder) { - return IfElse(kDefaulExecutionScope, pred, then_builder, else_builder); + return IfElse(kDefaultExecutionScope, pred, then_builder, else_builder); } // Adds a conditional operation that will execute a command buffer constructed @@ -289,7 +289,7 @@ class CommandBuffer { // Adds a conditional Case operation to default execution scope. absl::Status Case(DeviceMemory index, std::vector branches) { - return Case(kDefaulExecutionScope, index, branches); + return Case(kDefaultExecutionScope, index, branches); } // Adds a conditional operation that will execute a command buffer constructed @@ -304,7 +304,7 @@ class CommandBuffer { // Adds a conditional For operation to default execution scope. absl::Status For(int32_t num_iteration, DeviceMemory loop_counter, Builder body_builder) { - return For(kDefaulExecutionScope, num_iteration, loop_counter, + return For(kDefaultExecutionScope, num_iteration, loop_counter, body_builder); } @@ -332,7 +332,7 @@ class CommandBuffer { // Adds a conditional While operation to default execution scope. absl::Status While(DeviceMemory pred, ExecutionScopeBuilder cond_builder, Builder body_builder) { - return While(kDefaulExecutionScope, pred, cond_builder, body_builder); + return While(kDefaultExecutionScope, pred, cond_builder, body_builder); } // Submits the command buffer for execution. diff --git a/xla/stream_executor/cuda/cuda_command_buffer.cc b/xla/stream_executor/cuda/cuda_command_buffer.cc index 4ddb5348dc75b..ca7b9b345dd6a 100644 --- a/xla/stream_executor/cuda/cuda_command_buffer.cc +++ b/xla/stream_executor/cuda/cuda_command_buffer.cc @@ -612,7 +612,7 @@ absl::Status CudaCommandBuffer::PrepareFinalization() { } TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel()); - TF_RETURN_IF_ERROR(CommandBuffer::Launch(*noop, kDefaulExecutionScope, + TF_RETURN_IF_ERROR(CommandBuffer::Launch(*noop, kDefaultExecutionScope, ThreadDim(), BlockDim())); return absl::OkStatus(); diff --git a/xla/stream_executor/gpu/gpu_command_buffer.cc b/xla/stream_executor/gpu/gpu_command_buffer.cc index 440346c3f6e2a..70dabe4c9cb69 100644 --- a/xla/stream_executor/gpu/gpu_command_buffer.cc +++ b/xla/stream_executor/gpu/gpu_command_buffer.cc @@ -105,7 +105,7 @@ static std::atomic alive_execs(0); GpuCommandBuffer::GpuCommandBuffer(Mode mode, StreamExecutor* parent) : mode_(mode), parent_(parent) { - execution_scopes_.try_emplace(kDefaulExecutionScope); + execution_scopes_.try_emplace(kDefaultExecutionScope); } GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier( @@ -118,7 +118,7 @@ GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier( absl::Status GpuCommandBuffer::DisableBarriersExecution( GpuCommandBuffer& root_command_buffer) { - ExecutionScope& execution_scope = execution_scopes_[kDefaulExecutionScope]; + ExecutionScope& execution_scope = execution_scopes_[kDefaultExecutionScope]; for (GpuGraphBarrierInfo& barrier : execution_scope.barriers) { if (barrier.is_barrier_node) { @@ -669,8 +669,8 @@ absl::Status GpuCommandBuffer::For(ExecutionScopeId execution_scope_id, TF_RETURN_IF_ERROR(body->Barrier()); // Decide if we want to continue loop iteration. - return body->LaunchSetForConditionKernel(kDefaulExecutionScope, conditional, - loop_counter, num_iteration); + return body->LaunchSetForConditionKernel( + kDefaultExecutionScope, conditional, loop_counter, num_iteration); }; std::array builders = {std::move(body)}; @@ -694,9 +694,9 @@ absl::Status GpuCommandBuffer::While(ExecutionScopeId execution_scope_id, auto body = [&](GpuCommandBuffer* body, GraphConditionalHandle conditional) { TF_RETURN_IF_ERROR(body_builder(body)); TF_RETURN_IF_ERROR(body->Barrier()); - TF_RETURN_IF_ERROR(cond_builder(kDefaulExecutionScope, body)); + TF_RETURN_IF_ERROR(cond_builder(kDefaultExecutionScope, body)); TF_RETURN_IF_ERROR(body->Barrier()); - return body->LaunchSetWhileConditionKernel(kDefaulExecutionScope, + return body->LaunchSetWhileConditionKernel(kDefaultExecutionScope, conditional, pred); }; diff --git a/xla/stream_executor/gpu/gpu_command_buffer.h b/xla/stream_executor/gpu/gpu_command_buffer.h index 9c580a1986f6c..886713d2277bd 100644 --- a/xla/stream_executor/gpu/gpu_command_buffer.h +++ b/xla/stream_executor/gpu/gpu_command_buffer.h @@ -155,11 +155,11 @@ class GpuCommandBuffer : public CommandBuffer { absl::Span barriers(ExecutionScopeId id) const; absl::Span nodes() const { - return nodes(kDefaulExecutionScope); + return nodes(kDefaultExecutionScope); } absl::Span barriers() const { - return barriers(kDefaulExecutionScope); + return barriers(kDefaultExecutionScope); } // Returns the list of dependencies for a given node. `node` must be a node