Skip to content

Commit

Permalink
Use c10::to_string in more places (pytorch#28605)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#28605

This was added because std::to_string isn't available in libstc++
on Android.  Use it in more places to get the PyTorch Android
build working with libstdc++.

Test Plan: Internal android build.

Reviewed By: jerryzh168

Differential Revision: D18099520

fbshipit-source-id: 17a2b617c2d21deadd0fdac1db849823637981fc
  • Loading branch information
dreiss authored and facebook-github-bot committed Oct 24, 2019
1 parent df81cb2 commit da6b8a9
Show file tree
Hide file tree
Showing 20 changed files with 37 additions and 37 deletions.
10 changes: 5 additions & 5 deletions aten/src/ATen/DLConvertor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ static Device getATenDevice(const DLContext& ctx) {
return at::Device(DeviceType::HIP, ctx.device_id);
default:
throw std::logic_error(
"Unsupported device_type: " + std::to_string(ctx.device_type));
"Unsupported device_type: " + c10::to_string(ctx.device_type));
}
}

Expand All @@ -104,7 +104,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
default:
throw std::logic_error(
"Unsupported kUInt bits " + std::to_string(dtype.bits));
"Unsupported kUInt bits " + c10::to_string(dtype.bits));
}
break;
case DLDataTypeCode::kDLInt:
Expand All @@ -123,7 +123,7 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
default:
throw std::logic_error(
"Unsupported kInt bits " + std::to_string(dtype.bits));
"Unsupported kInt bits " + c10::to_string(dtype.bits));
}
break;
case DLDataTypeCode::kDLFloat:
Expand All @@ -139,11 +139,11 @@ ScalarType toScalarType(const DLDataType& dtype) {
break;
default:
throw std::logic_error(
"Unsupported kFloat bits " + std::to_string(dtype.bits));
"Unsupported kFloat bits " + c10::to_string(dtype.bits));
}
break;
default:
throw std::logic_error("Unsupported code " + std::to_string(dtype.code));
throw std::logic_error("Unsupported code " + c10::to_string(dtype.code));
}
return stype;
}
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/ParallelCommon.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ const char* get_env_var(
size_t get_env_num_threads(const char* var_name, size_t def_value = 0) {
try {
if (auto* value = std::getenv(var_name)) {
int nthreads = std::stoi(value);
int nthreads = c10::stoi(value);
TORCH_CHECK(nthreads > 0);
return nthreads;
}
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/quantized/cpu/qconv.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class QConv2dInt8 final : public c10::OperatorKernel {
TORCH_CHECK(bias.dim() == 1, "bias should be a vector (1D Tensor)");
TORCH_CHECK(
bias.size(0) == K,
"bias should have K elements: " + std::to_string(K));
"bias should have K elements: " + c10::to_string(K));
bias_ptr = bias.data_ptr<float>();
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/autograd/cpp_hook.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ variable_list CppFunctionPreHook::operator()(const variable_list& values) {
// Don't change gradient
continue;
}
check_single_result(value, res, std::to_string(i));
check_single_result(value, res, c10::to_string(i));
value = res;
}
variable_list results(values);
Expand Down
6 changes: 3 additions & 3 deletions torch/csrc/autograd/custom_function.h
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,8 @@ variable_list CppNode<T>::apply(variable_list&& inputs) {
if (num_outputs != num_forward_inputs) {
std::string msg("function ");
msg += name() + " returned an incorrect number of gradients (expected ";
msg += std::to_string(num_forward_inputs) + ", got " ;
msg += std::to_string(num_outputs) + ")";
msg += c10::to_string(num_forward_inputs) + ", got " ;
msg += c10::to_string(num_outputs) + ")";
throw std::runtime_error(msg);
}

Expand All @@ -274,7 +274,7 @@ variable_list CppNode<T>::apply(variable_list&& inputs) {
if (outputs[i].defined()) {
std::string msg("function ");
msg += name() + " returned a gradient different that is defined at position ";
msg += std::to_string(i + 1) + ", but the corresponding forward input was not a Variable";
msg += c10::to_string(i + 1) + ", but the corresponding forward input was not a Variable";
throw std::runtime_error(msg);
}
continue;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/code_template.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ struct TemplateEnv {
// Add a number 'v' to the map at key 'k'
template <typename T>
void d(const std::string& k, const T& v) {
strings_[k] = std::to_string(v);
strings_[k] = c10::to_string(v);
lists_.erase(k);
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/function.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ FunctionSchema defaultSchemaFor(const Function& function) {
for (size_t i = 0; i < num_inputs; ++i) {
const Value* v = g.inputs().at(i);
std::string name = v->hasDebugName() ? v->debugNameBase()
: ("argument_" + std::to_string(i));
: ("argument_" + c10::to_string(i));
args.emplace_back(std::move(name), unshapedType(g.inputs()[i]->type()));
}
for (size_t i = 0; i < g.outputs().size(); ++i) {
Expand Down
14 changes: 7 additions & 7 deletions torch/csrc/jit/fuser/codegen.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,15 +30,15 @@ size_t ${tensor}_dimIndex${d} = ${tensor}_linearIndex ${mod_sizes};
)");

static std::string valueName(const Value* n) {
return "n" + std::to_string(n->unique());
return "n" + c10::to_string(n->unique());
}

static std::string scalarValue(const int64_t v) {
return std::to_string(v);
return c10::to_string(v);
}

static std::string scalarValue(const bool v) {
return std::to_string(v);
return c10::to_string(v);
}

// Note: The NAN, NEG_INFINITY and POS_INFINITY strings map to device-specific
Expand Down Expand Up @@ -267,10 +267,10 @@ static std::string encodeRHS(const Node* n) {
// PyTorch converts (scalar) argument types to result before applying the
// operator e.g. 1.4-torch.tensor(3) = -2
env.s(
std::to_string(i),
c10::to_string(i),
typeCastedValueName(in->type(), *outtype, valueName(in)));
// Uncasted operands only used for comparison operators
env.s(std::to_string(i) + "_nocast", valueName(in));
env.s(c10::to_string(i) + "_nocast", valueName(in));
i++;
}

Expand Down Expand Up @@ -336,7 +336,7 @@ std::string generateKernel(
1); // + 1 because the first argument is the linearIndex
std::string tensor =
"t" +
std::to_string(
c10::to_string(
formals.size()); // can't be unique() because Param may be an output
const auto nDim = desc.nDim();
emitIndexingFor(tensorOffsets, tensor, nDim, desc.lastIsContiguous());
Expand All @@ -357,7 +357,7 @@ std::string generateKernel(
1); // + 1 because the first argument is the linearIndex
std::string scalar =
"s" +
std::to_string(
c10::to_string(
formals.size()); // can't be unique() because Param may be an output
env.d(
"formal_index",
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/fuser/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ std::shared_ptr<FusedKernel> compileKernel(
}

const bool use_cuda = device.is_cuda();
const std::string name = "kernel_" + std::to_string(next_kernel_id++);
const std::string name = "kernel_" + c10::to_string(next_kernel_id++);
std::string code =
generateKernel(name, *graph, flat_inputs, flat_outputs, use_cuda);
const FusedKernelConstructor& kernel_ctor =
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/import_source.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ struct ConstantTableValue : public SugaredValue {
const std::string& field) override {
const char* field_s = field.c_str();
char* end;
int64_t offset = std::strtoll(field_s + 1, &end, 10);
int64_t offset = strtoll(field_s + 1, &end, 10);
if (field.size() < 2 || *end != 0)
throw ErrorReport(loc) << "invalid constant specifier: " << field;
if (offset < 0 || size_t(offset) >= constants_->size()) {
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/ir.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -765,7 +765,7 @@ Value* Value::setDebugName(const std::string& name) {
if (last_dot_pos != std::string::npos && last_dot_pos + 1 != name.size()) {
if (name.find_first_not_of("0123456789", last_dot_pos + 1) ==
std::string::npos) {
suffix = std::stoll(name.substr(last_dot_pos + 1));
suffix = c10::stoll(name.substr(last_dot_pos + 1));
name_base = name.substr(0, last_dot_pos);
}
}
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/ir.h
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ struct Value {
if (hasDebugName()) {
return unique_name_;
}
return std::to_string(unique());
return c10::to_string(unique());
}
TORCH_API std::string debugNameBase() const;
Node* node() {
Expand Down
4 changes: 2 additions & 2 deletions torch/csrc/jit/irparser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,10 +154,10 @@ ParsedLiteral IRParser::parseScalarLiteral(Node* n) {
if (str.find('.') != std::string::npos ||
str.find('e') != std::string::npos) {
r.k = AttributeKind::f;
r.f = std::stod(str);
r.f = c10::stod(str);
} else {
r.k = AttributeKind::i;
r.i = std::stoll(str);
r.i = c10::stoll(str);
}
L.next();
return r;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/passes/python_print.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ struct PythonPrintImpl {
std::unordered_set<std::string>& used) {
std::string name = candidate;
while (used.count(name) || reserved_names.count(name)) {
name = candidate + std::to_string(next_id[name]++);
name = candidate + c10::to_string(next_id[name]++);
}
used.insert(name);
return name;
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/pickle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ std::vector<char> pickle_save(const at::IValue& ivalue) {
std::vector<at::TypePtr> types(writeable_tensors.size(), at::StringType::get());

for (size_t i = 0; i < writeable_tensors.size(); i++) {
keys.emplace_back(std::to_string(i));
keys.emplace_back(c10::to_string(i));
}

auto keys_tuple = at::ivalue::Tuple::create(keys);
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/pickler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ void Pickler::pushStorageOfTensor(const at::Tensor& tensor) {
std::string(toString(tensor.scalar_type())).append("Storage");
pushGlobal("torch", data_type);
// root_key
pushString(std::to_string(tensor_data_.size()));
pushString(c10::to_string(tensor_data_.size()));
// location
std::ostringstream ss;
ss << tensor.device();
Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/register_prim_ops.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ void checkDoubleInRange(double a) {
a > double(std::numeric_limits<int64_t>::max()) ||
a < double(std::numeric_limits<int64_t>::min())) {
throw c10::Error(
"Cannot convert float " + std::to_string(a) + " to integer", "");
"Cannot convert float " + c10::to_string(a) + " to integer", "");
return;
}
}
Expand Down
10 changes: 5 additions & 5 deletions torch/csrc/jit/script/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ struct to_ir {
std::vector<DefContext> def_stack_;
size_t temp_name_count_ = 0;
std::string createTempName(const std::string& prefix) {
return prefix + std::to_string(temp_name_count_++);
return prefix + c10::to_string(temp_name_count_++);
}

void pushFrame(Block* b, bool starts_def = false) {
Expand Down Expand Up @@ -2236,7 +2236,7 @@ struct to_ir {
case TK_IN:
return aten::__contains__;
default:
throw std::runtime_error("unknown kind " + std::to_string(kind));
throw std::runtime_error("unknown kind " + c10::to_string(kind));
}
}

Expand Down Expand Up @@ -2279,7 +2279,7 @@ struct to_ir {
case TK_IN:
return "__contains__";
default:
throw std::runtime_error("unknown kind " + std::to_string(kind));
throw std::runtime_error("unknown kind " + c10::to_string(kind));
}
}

Expand Down Expand Up @@ -3253,15 +3253,15 @@ c10::QualifiedName CompilationUnit::mangle(
// Append the part of the name up to the end of the prefix
newAtom.append(atom, 0, pos);
newAtom.append(manglePrefix);
newAtom.append(std::to_string(mangleIndex_++));
newAtom.append(c10::to_string(mangleIndex_++));
atom = newAtom;
return QualifiedName(atoms);
}
}

// Otherwise add a mangle namespace right before the basename
TORCH_INTERNAL_ASSERT(!atoms.empty());
atoms.insert(atoms.end() - 1, manglePrefix + std::to_string(mangleIndex_++));
atoms.insert(atoms.end() - 1, manglePrefix + c10::to_string(mangleIndex_++));
return QualifiedName(atoms);
}

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/script/tree_views.h
Original file line number Diff line number Diff line change
Expand Up @@ -906,7 +906,7 @@ struct SliceExpr : public Expr {

private:
Expr createInt(int value) const {
return Expr(Const::create(range(), std::to_string(value)));
return Expr(Const::create(range(), c10::to_string(value)));
}
};

Expand Down
2 changes: 1 addition & 1 deletion torch/csrc/jit/testing/file_check.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ struct FileCheckImpl {
}
size_t end =
assertFind(SourceRange(source, end_check_string, end_line), ":");
count = std::stoll(
count = c10::stoll(
source->text().substr(end_check_string, end - end_check_string));
end_check_string = end + 2; // add ':' and the space
}
Expand Down

0 comments on commit da6b8a9

Please sign in to comment.