Skip to content

Commit

Permalink
Revert "Replace is_same with is_same_v for concise syntax (pytorch#14…
Browse files Browse the repository at this point in the history
…5450)"

This reverts commit 5205158.

Reverted pytorch#145450 on behalf of https://github.com/jeanschmidt due to testing to see if reverting would fix timeout in inductor jobs ([comment](pytorch#145450 (comment)))
  • Loading branch information
pytorchmergebot committed Feb 12, 2025
1 parent b004228 commit ce80865
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 10 deletions.
2 changes: 1 addition & 1 deletion aten/src/ATen/core/List_inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ List<T>::List(TypePtr elementType)
: List(make_intrusive<c10::detail::ListImpl>(
typename c10::detail::ListImpl::list_type(),
std::move(elementType))) {
static_assert(std::is_same_v<T, IValue> || std::is_same_v<T, c10::intrusive_ptr<ivalue::Future>>,
static_assert(std::is_same_v<T, IValue> || std::is_same<T, c10::intrusive_ptr<ivalue::Future>>::value,
"This constructor is only valid for c10::impl::GenericList or List<Future>.");
}

Expand Down
4 changes: 2 additions & 2 deletions aten/src/ATen/cpu/vec/vec256/zarch/vec256_zarch.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ constexpr bool is_zarch_implemented_quant() {

template <typename T>
constexpr bool is_zarch_implemented_complex() {
return std::is_same_v<T, c10::complex<float>> ||
std::is_same_v<T, c10::complex<double>>;
return std::is_same<T, c10::complex<float>>::value ||
std::is_same<T, c10::complex<double>>::value;
}

constexpr int offset0 = 0;
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/cuda/cub.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -478,7 +478,7 @@ constexpr int block_threads(){

template<typename scalar_t, typename ScanOpT>
inline void inclusive_deterministic_scan(const scalar_t * input, scalar_t * output, ScanOpT scan_op, int64_t num_items) {
static_assert(std::is_same_v<ScanOpT, std::plus<scalar_t>>, "");
static_assert(std::is_same<ScanOpT, std::plus<scalar_t>>::value, "");
constexpr int BLOCK_THREADS = block_threads<sizeof(scalar_t)>();
constexpr int ITEMS_PER_THREAD = 16;
auto grid_size = (num_items + BLOCK_THREADS * ITEMS_PER_THREAD - 1) / (BLOCK_THREADS * ITEMS_PER_THREAD);
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/cuda/ScanUtils.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -451,7 +451,7 @@ void scan_dim(const TensorBase& self, const TensorBase& result,
TORCH_INTERNAL_ASSERT(result.is_contiguous());
if (self.numel() == self.size(dim)) {
if constexpr (std::is_same_v<BinaryFunction, std::plus<scalar_t>>) {
if constexpr (std::is_same<BinaryFunction, std::plus<scalar_t>>::value) {
if (C10_UNLIKELY(at::globalContext().deterministicAlgorithms()) && (self.is_floating_point() || self.is_complex())) {
# if (defined(CUDA_VERSION) && CUDA_VERSION > 11040) || defined(USE_ROCM)
cuda::cub::inclusive_deterministic_scan(self_->const_data_ptr<scalar_t>(), result.mutable_data_ptr<scalar_t>(), binary_op, self.numel());
Expand Down
8 changes: 4 additions & 4 deletions aten/src/ATen/native/sparse/cuda/SparseMatMul.cu
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,8 @@ struct CusparseMatrixMultiplyOp {
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same_v<c10::complex<float>, scalar_t> ||
std::is_same_v<c10::complex<double>, scalar_t>,
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"cusparseSpGEMM only supports data type of half, bfloat16, float, double and complex float, double.");
// SpGEMM Computation
TORCH_CUDASPARSE_CHECK(cusparseSpGEMM_createDescr(&spgemmDesc));
Expand Down Expand Up @@ -673,8 +673,8 @@ void sparse_sparse_matmul_cuda_kernel(
std::is_same_v<c10::BFloat16, scalar_t> ||
std::is_same_v<float, scalar_t> ||
std::is_same_v<double, scalar_t> ||
std::is_same_v<c10::complex<float>, scalar_t> ||
std::is_same_v<c10::complex<double>, scalar_t>,
std::is_same<c10::complex<float>, scalar_t>::value ||
std::is_same<c10::complex<double>, scalar_t>::value,
"sparse_sparse_matmul_cuda_kernel only supports data type of half, bfloat16, float, double and complex float, double.");

// older versions of cusparse on Windows segfault for complex128 dtype
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/templates/TensorBody.h
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,7 @@ class TORCH_API Tensor: public TensorBase {
template <typename T>
using hook_return_void_t = std::enable_if_t<std::is_void<typename std::invoke_result_t<T&, Tensor>>::value, unsigned>;
template <typename T>
using hook_return_var_t = std::enable_if_t<std::is_same_v<typename std::invoke_result_t<T&, Tensor>, Tensor>, unsigned>;
using hook_return_var_t = std::enable_if_t<std::is_same<typename std::invoke_result_t<T&, Tensor>, Tensor>::value, unsigned>;

/// Registers a backward hook.
///
Expand Down

0 comments on commit ce80865

Please sign in to comment.