Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PR #20996: [GPU][NFC] Fix a mistype. #21027

Merged
merged 1 commit into from
Jan 7, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion xla/service/gpu/runtime/command_buffer_cmd.h
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ class CommandBufferCmd {
// This argument allows conditional commands to record a command sequence
// into non-default execution scope.
se::CommandBuffer::ExecutionScopeId execution_scope_id =
se::CommandBuffer::kDefaulExecutionScope;
se::CommandBuffer::kDefaultExecutionScope;
};

// See Thunk documentation for XLA execution stages (prepare, initialize,
Expand Down
26 changes: 13 additions & 13 deletions xla/stream_executor/command_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class CommandBuffer {
// Execution scope enables fine-grained synchronization scopes inside
// commands buffers. Implementation is very backend-specific and for CUDA/ROCM
// backends it's implemented as DAG edges. By default all commands launched in
// the `kDefaulExecutionScope` execution scope.
// the `kDefaultExecutionScope` execution scope.
//
// Example #1: independent execution scopes and independent barriers
//
Expand Down Expand Up @@ -114,7 +114,7 @@ class CommandBuffer {
// semantics as stream wait operation.
//
TSL_LIB_GTL_DEFINE_INT_TYPE(ExecutionScopeId, uint64_t);
static constexpr auto kDefaulExecutionScope = ExecutionScopeId(0);
static constexpr auto kDefaultExecutionScope = ExecutionScopeId(0);

// Builder constructs nested command buffers owned by a parent command buffer.
//
Expand Down Expand Up @@ -188,7 +188,7 @@ class CommandBuffer {
ExecutionScopeId to_execution_scope_id) = 0;

// Adds an execution barrier to the default execution scope.
absl::Status Barrier() { return Barrier(kDefaulExecutionScope); }
absl::Status Barrier() { return Barrier(kDefaultExecutionScope); }

// Adds a kernel launch command.
virtual absl::Status Launch(ExecutionScopeId execution_scope_id,
Expand All @@ -198,7 +198,7 @@ class CommandBuffer {
// Adds a kernel launch command to the default execution scope.
absl::Status Launch(const ThreadDim& threads, const BlockDim& blocks,
const Kernel& kernel, const KernelArgs& args) {
return Launch(kDefaulExecutionScope, threads, blocks, kernel, args);
return Launch(kDefaultExecutionScope, threads, blocks, kernel, args);
}

// Type-safe wrapper for launching typed kernels. Notice that the order of
Expand All @@ -214,7 +214,7 @@ class CommandBuffer {
absl::Status Launch(const TypedKernel<Params...>& kernel,
const ThreadDim& threads, const BlockDim& blocks,
Args... args) {
return Launch(kernel, kDefaulExecutionScope, threads, blocks, args...);
return Launch(kernel, kDefaultExecutionScope, threads, blocks, args...);
}

// Adds a nested command buffer.
Expand All @@ -223,7 +223,7 @@ class CommandBuffer {

// Adds a nested command buffer to the default execution scope.
absl::Status AddNestedCommandBuffer(const CommandBuffer& nested) {
return AddNestedCommandBuffer(kDefaulExecutionScope, nested);
return AddNestedCommandBuffer(kDefaultExecutionScope, nested);
}

// Adds a device-to-device memory copy.
Expand All @@ -236,7 +236,7 @@ class CommandBuffer {
absl::Status MemcpyDeviceToDevice(DeviceMemoryBase* dst,
const DeviceMemoryBase& src,
uint64_t size) {
return MemcpyDeviceToDevice(kDefaulExecutionScope, dst, src, size);
return MemcpyDeviceToDevice(kDefaultExecutionScope, dst, src, size);
}

// Adds a memset command.
Expand All @@ -247,7 +247,7 @@ class CommandBuffer {
// Adds a memset command to the default execution scope.
absl::Status Memset(DeviceMemoryBase* dst, BitPattern bit_pattern,
size_t num_elements) {
return Memset(kDefaulExecutionScope, dst, bit_pattern, num_elements);
return Memset(kDefaultExecutionScope, dst, bit_pattern, num_elements);
}

//--------------------------------------------------------------------------//
Expand All @@ -261,7 +261,7 @@ class CommandBuffer {

// Adds a conditional If operation to default execution scope.
absl::Status If(DeviceMemory<bool> pred, Builder then_builder) {
return If(kDefaulExecutionScope, pred, then_builder);
return If(kDefaultExecutionScope, pred, then_builder);
}

// Adds a conditional operation that will execute a command buffer constructed
Expand All @@ -274,7 +274,7 @@ class CommandBuffer {
// Adds a conditional IfElse operation to default execution scope.
absl::Status IfElse(DeviceMemory<bool> pred, Builder then_builder,
Builder else_builder) {
return IfElse(kDefaulExecutionScope, pred, then_builder, else_builder);
return IfElse(kDefaultExecutionScope, pred, then_builder, else_builder);
}

// Adds a conditional operation that will execute a command buffer constructed
Expand All @@ -289,7 +289,7 @@ class CommandBuffer {
// Adds a conditional Case operation to default execution scope.
absl::Status Case(DeviceMemory<int32_t> index,
std::vector<Builder> branches) {
return Case(kDefaulExecutionScope, index, branches);
return Case(kDefaultExecutionScope, index, branches);
}

// Adds a conditional operation that will execute a command buffer constructed
Expand All @@ -304,7 +304,7 @@ class CommandBuffer {
// Adds a conditional For operation to default execution scope.
absl::Status For(int32_t num_iteration, DeviceMemory<int32_t> loop_counter,
Builder body_builder) {
return For(kDefaulExecutionScope, num_iteration, loop_counter,
return For(kDefaultExecutionScope, num_iteration, loop_counter,
body_builder);
}

Expand Down Expand Up @@ -332,7 +332,7 @@ class CommandBuffer {
// Adds a conditional While operation to default execution scope.
absl::Status While(DeviceMemory<bool> pred,
ExecutionScopeBuilder cond_builder, Builder body_builder) {
return While(kDefaulExecutionScope, pred, cond_builder, body_builder);
return While(kDefaultExecutionScope, pred, cond_builder, body_builder);
}

// Submits the command buffer for execution.
Expand Down
2 changes: 1 addition & 1 deletion xla/stream_executor/cuda/cuda_command_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,7 @@ absl::Status CudaCommandBuffer::PrepareFinalization() {
}

TF_ASSIGN_OR_RETURN(NoOpKernel * noop, GetNoOpKernel());
TF_RETURN_IF_ERROR(CommandBuffer::Launch(*noop, kDefaulExecutionScope,
TF_RETURN_IF_ERROR(CommandBuffer::Launch(*noop, kDefaultExecutionScope,
ThreadDim(), BlockDim()));

return absl::OkStatus();
Expand Down
12 changes: 6 additions & 6 deletions xla/stream_executor/gpu/gpu_command_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ static std::atomic<int64_t> alive_execs(0);

GpuCommandBuffer::GpuCommandBuffer(Mode mode, StreamExecutor* parent)
: mode_(mode), parent_(parent) {
execution_scopes_.try_emplace(kDefaulExecutionScope);
execution_scopes_.try_emplace(kDefaultExecutionScope);
}

GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier(
Expand All @@ -118,7 +118,7 @@ GpuCommandBuffer::Dependencies GpuCommandBuffer::GetBarrier(

absl::Status GpuCommandBuffer::DisableBarriersExecution(
GpuCommandBuffer& root_command_buffer) {
ExecutionScope& execution_scope = execution_scopes_[kDefaulExecutionScope];
ExecutionScope& execution_scope = execution_scopes_[kDefaultExecutionScope];

for (GpuGraphBarrierInfo& barrier : execution_scope.barriers) {
if (barrier.is_barrier_node) {
Expand Down Expand Up @@ -669,8 +669,8 @@ absl::Status GpuCommandBuffer::For(ExecutionScopeId execution_scope_id,
TF_RETURN_IF_ERROR(body->Barrier());

// Decide if we want to continue loop iteration.
return body->LaunchSetForConditionKernel(kDefaulExecutionScope, conditional,
loop_counter, num_iteration);
return body->LaunchSetForConditionKernel(
kDefaultExecutionScope, conditional, loop_counter, num_iteration);
};

std::array<ConditionBuilder, 1> builders = {std::move(body)};
Expand All @@ -694,9 +694,9 @@ absl::Status GpuCommandBuffer::While(ExecutionScopeId execution_scope_id,
auto body = [&](GpuCommandBuffer* body, GraphConditionalHandle conditional) {
TF_RETURN_IF_ERROR(body_builder(body));
TF_RETURN_IF_ERROR(body->Barrier());
TF_RETURN_IF_ERROR(cond_builder(kDefaulExecutionScope, body));
TF_RETURN_IF_ERROR(cond_builder(kDefaultExecutionScope, body));
TF_RETURN_IF_ERROR(body->Barrier());
return body->LaunchSetWhileConditionKernel(kDefaulExecutionScope,
return body->LaunchSetWhileConditionKernel(kDefaultExecutionScope,
conditional, pred);
};

Expand Down
4 changes: 2 additions & 2 deletions xla/stream_executor/gpu/gpu_command_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,11 @@ class GpuCommandBuffer : public CommandBuffer {
absl::Span<const GpuGraphBarrierInfo> barriers(ExecutionScopeId id) const;

absl::Span<const GpuGraphNodeInfo> nodes() const {
return nodes(kDefaulExecutionScope);
return nodes(kDefaultExecutionScope);
}

absl::Span<const GpuGraphBarrierInfo> barriers() const {
return barriers(kDefaulExecutionScope);
return barriers(kDefaultExecutionScope);
}

// Returns the list of dependencies for a given node. `node` must be a node
Expand Down
Loading