Skip to content

Commit

Permalink
Run clang-tidy on all the C++ code
Browse files Browse the repository at this point in the history
  • Loading branch information
Luthaf committed Sep 13, 2023
1 parent f92a2fa commit 34e5438
Show file tree
Hide file tree
Showing 4 changed files with 68 additions and 70 deletions.
73 changes: 36 additions & 37 deletions rascaline-c-api/include/rascaline.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ namespace rascaline {
class RascalineError : public std::runtime_error {
public:
/// Create a new error with the given message
RascalineError(std::string message): std::runtime_error(message) {}
~RascalineError() = default;
RascalineError(const std::string& message): std::runtime_error(message) {}
~RascalineError() override = default;

/// RascalineError is copy-constructible
RascalineError(const RascalineError&) = default;
Expand All @@ -48,7 +48,7 @@ namespace details {
/// Class able to store exceptions and retrieve them later
class ExceptionsStore {
public:
ExceptionsStore(): map_(), next_id_(-1) {}
ExceptionsStore() = default;

ExceptionsStore(const ExceptionsStore&) = delete;
ExceptionsStore(ExceptionsStore&&) = delete;
Expand All @@ -58,36 +58,36 @@ namespace details {
/// Save an exception pointer inside the exceptions store and return the
/// corresponding id as a **negative** integer.
int32_t save_exception(std::exception_ptr exception) {
auto id = next_id_;
auto exception_id = next_id_;

// this should not underflow, but better safe than sorry
if (next_id_ == INT32_MIN) {
throw RascalineError("too many exceptions, what are you doing???");
}
next_id_ -= 1;

map_.emplace(id, std::move(exception));
return id;
map_.emplace(exception_id, std::move(exception));
return exception_id;
}

/// Get the exception pointer corresponding to the given exception id.
/// The id **MUST** have been generated by a previous call to
/// `save_exception`.
std::exception_ptr extract_exception(int32_t id) {
auto it = map_.find(id);
if (it == map_.end()) {
std::exception_ptr extract_exception(int32_t exception_id) {
auto iterator = map_.find(exception_id);
if (iterator == map_.end()) {
throw RascalineError("internal error: tried to access a non-existing exception");
}

auto exception = it->second;
map_.erase(it);
auto exception = iterator->second;
map_.erase(iterator);

return exception;
}

private:
std::unordered_map<int32_t, std::exception_ptr> map_;
int32_t next_id_;
int32_t next_id_ = -1;
};

/// Singleton version of `ExceptionsStore`, protected by a mutex to be safe
Expand All @@ -99,16 +99,16 @@ namespace details {
static int32_t save_exception(std::exception_ptr exception) {
const std::lock_guard<std::mutex> lock(GlobalExceptionsStore::mutex());
auto& store = GlobalExceptionsStore::instance();
return store.save_exception(std::move(exception));
return store.save_exception(exception);
}

/// Get the exception pointer corresponding to the given exception id.
/// The id **MUST** have been generated by a previous call to
/// `save_exception`.
static std::exception_ptr extract_exception(int32_t id) {
static std::exception_ptr extract_exception(int32_t exception_id) {
const std::lock_guard<std::mutex> lock(GlobalExceptionsStore::mutex());
auto& store = GlobalExceptionsStore::instance();
return store.extract_exception(id);
return store.extract_exception(exception_id);
}

private:
Expand Down Expand Up @@ -144,8 +144,10 @@ namespace details {
__code__ \
return RASCAL_SUCCESS; \
} catch (...) { \
auto e = std::current_exception(); \
return details::GlobalExceptionsStore::save_exception(std::move(e));\
auto exception = std::current_exception(); \
return details::GlobalExceptionsStore::save_exception( \
std::move(exception) \
); \
} \
} while (false)

Expand Down Expand Up @@ -248,7 +250,7 @@ class System {
[](const void* self, double* cell) {
RASCAL_SYSTEM_CATCH_EXCEPTIONS(
auto cpp_cell = reinterpret_cast<const System*>(self)->cell();
std::memcpy(cell, &cpp_cell[0][0], 9 * sizeof(double));
std::memcpy(cell, cpp_cell.data(), 9 * sizeof(double));
);
},
// compute_neighbors
Expand Down Expand Up @@ -293,12 +295,12 @@ class BasicSystems {
/// chemfiles](https://chemfiles.org/chemfiles/latest/formats.html).
///
/// @throws RascalineError if chemfiles can not read the file
BasicSystems(std::string path): systems_(nullptr), count_(0) {
BasicSystems(const std::string& path) {
details::check_status(rascal_basic_systems_read(path.c_str(), &systems_, &count_));
}

~BasicSystems() {
details::check_status(rascal_basic_systems_free(systems_, count_));
~BasicSystems() noexcept {
rascal_basic_systems_free(systems_, count_);
}

/// BasicSystems is **NOT** copy-constructible
Expand All @@ -307,12 +309,12 @@ class BasicSystems {
BasicSystems& operator=(const BasicSystems&) = delete;

/// BasicSystems is move-constructible
BasicSystems(BasicSystems&& other) {
BasicSystems(BasicSystems&& other) noexcept {
*this = std::move(other);
}

/// BasicSystems can be move-assigned
BasicSystems& operator=(BasicSystems&& other) {
BasicSystems& operator=(BasicSystems&& other) noexcept {
this->~BasicSystems();
this->systems_ = nullptr;
this->count_ = 0;
Expand Down Expand Up @@ -398,12 +400,12 @@ class LabelsSelection {
}

/// LabelsSelection can be move-constructed
LabelsSelection(LabelsSelection&& other): LabelsSelection(std::nullopt, std::nullopt) {
LabelsSelection(LabelsSelection&& other) noexcept : LabelsSelection(std::nullopt, std::nullopt) {
*this = std::move(other);
}

/// LabelsSelection can be move-assigned
LabelsSelection& operator=(LabelsSelection&& other) {
LabelsSelection& operator=(LabelsSelection&& other) noexcept {
this->subset_ = std::move(other.subset_);
this->predefined_ = std::move(other.predefined_);
if (this->subset_) {
Expand Down Expand Up @@ -569,7 +571,7 @@ class Calculator {
}

~Calculator() {
details::check_status(rascal_calculator_free(this->calculator_));
rascal_calculator_free(this->calculator_);
}

/// Calculator is **NOT** copy-constructible
Expand All @@ -578,12 +580,12 @@ class Calculator {
Calculator& operator=(const Calculator&) = delete;

/// Calculator is move-constructible
Calculator(Calculator&& other) {
Calculator(Calculator&& other) noexcept {
*this = std::move(other);
}

/// Calculator can be move-assigned
Calculator& operator=(Calculator&& other) {
Calculator& operator=(Calculator&& other) noexcept {
this->~Calculator();
this->calculator_ = nullptr;

Expand All @@ -597,7 +599,7 @@ class Calculator {
auto buffer = std::vector<char>(32, '\0');
while (true) {
auto status = rascal_calculator_name(
calculator_, &buffer[0], buffer.size()
calculator_, buffer.data(), buffer.size()
);

if (status != RASCAL_BUFFER_SIZE_ERROR) {
Expand All @@ -615,7 +617,7 @@ class Calculator {
auto buffer = std::vector<char>(256, '\0');
while (true) {
auto status = rascal_calculator_parameters(
calculator_, &buffer[0], buffer.size()
calculator_, buffer.data(), buffer.size()
);

if (status != RASCAL_BUFFER_SIZE_ERROR) {
Expand Down Expand Up @@ -741,6 +743,8 @@ class Calculator {
/// other function).
class Profiler {
public:
Profiler() = delete;

/// Enable or disable profiling data collection. By default, data collection
/// is disabled.
///
Expand All @@ -766,11 +770,11 @@ class Profiler {
/// @param format in which format should the data be provided. `"table"`,
/// `"short_table"` and `"json"` are currently supported
/// @returns the current profiling data, in the requested format
static std::string get(std::string format) {
static std::string get(const std::string& format) {
auto buffer = std::vector<char>(1024, '\0');
while (true) {
auto status = rascal_profiling_get(
format.c_str(), &buffer[0], buffer.size()
format.c_str(), buffer.data(), buffer.size()
);

if (status != RASCAL_BUFFER_SIZE_ERROR) {
Expand All @@ -782,11 +786,6 @@ class Profiler {
buffer.resize(buffer.size() * 2, '\0');
}
}

private:
// make the constructor private and undefined since this class only offers
// static functions.
Profiler();
};

}
Expand Down
6 changes: 3 additions & 3 deletions rascaline-torch/include/rascaline/torch.hpp
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
#ifndef RASCALINE_TORCH_HPP
#define RASCALINE_TORCH_HPP

#include "rascaline/torch/exports.h"
#include "rascaline/torch/exports.h" // IWYU pragma: export

#include "rascaline/torch/system.hpp"
#include "rascaline/torch/calculator.hpp"
#include "rascaline/torch/system.hpp" // IWYU pragma: export
#include "rascaline/torch/calculator.hpp" // IWYU pragma: export


#endif
44 changes: 22 additions & 22 deletions rascaline-torch/src/autograd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ std::vector<torch::Tensor> PositionsGrad::forward(

auto samples = dX_dr->samples();
auto samples_values = samples->values();
auto samples_values_ptr = samples_values.data_ptr<int32_t>();
auto* samples_values_ptr = samples_values.data_ptr<int32_t>();
always_assert(samples->names().size() == 3);
always_assert(samples->names()[0] == "sample");
always_assert(samples->names()[1] == "structure");
Expand All @@ -224,15 +224,15 @@ std::vector<torch::Tensor> PositionsGrad::forward(
// ========================= extract pointers =========================== //
auto dA_dr = torch::zeros_like(all_positions);
always_assert(dA_dr.is_contiguous() && dA_dr.is_cpu());
auto dA_dr_ptr = dA_dr.data_ptr<double>();
auto* dA_dr_ptr = dA_dr.data_ptr<double>();

auto dX_dr_values = dX_dr->values();
always_assert(dX_dr_values.is_contiguous() && dX_dr_values.is_cpu());
auto dX_dr_ptr = dX_dr_values.data_ptr<double>();
auto* dX_dr_ptr = dX_dr_values.data_ptr<double>();

const auto& dA_dX_sizes = dA_dX.sizes();
always_assert(dA_dX.is_contiguous() && dA_dX.is_cpu());
auto dA_dX_ptr = dA_dX.data_ptr<double>();
auto* dA_dX_ptr = dA_dX.data_ptr<double>();

// total size of component + property dimension
int64_t n_features = 1;
Expand Down Expand Up @@ -284,7 +284,7 @@ std::vector<torch::Tensor> PositionsGrad::backward(

auto samples = dX_dr->samples();
auto samples_values = samples->values();
auto samples_values_ptr = samples_values.data_ptr<int32_t>();
auto* samples_values_ptr = samples_values.data_ptr<int32_t>();
always_assert(samples->names().size() == 3);
always_assert(samples->names()[0] == "sample");
always_assert(samples->names()[1] == "structure");
Expand All @@ -293,14 +293,14 @@ std::vector<torch::Tensor> PositionsGrad::backward(
// ========================= extract pointers =========================== //
auto dX_dr_values = dX_dr->values();
always_assert(dX_dr_values.is_contiguous() && dX_dr_values.is_cpu());
auto dX_dr_ptr = dX_dr_values.data_ptr<double>();
auto* dX_dr_ptr = dX_dr_values.data_ptr<double>();

always_assert(dB_d_dA_dr.is_contiguous() && dB_d_dA_dr.is_cpu());
auto dB_d_dA_dr_ptr = dB_d_dA_dr.data_ptr<double>();
auto* dB_d_dA_dr_ptr = dB_d_dA_dr.data_ptr<double>();

const auto& dA_dX_sizes = dA_dX.sizes();
always_assert(dA_dX.is_contiguous() && dA_dX.is_cpu());
auto dA_dX_ptr = dA_dX.data_ptr<double>();
auto* dA_dX_ptr = dA_dX.data_ptr<double>();

// total size of component + property dimension
int64_t n_features = 1;
Expand All @@ -323,7 +323,7 @@ std::vector<torch::Tensor> PositionsGrad::backward(
if (dA_dX.requires_grad()) {
dB_d_dA_dX = torch::zeros_like(dA_dX);
always_assert(dB_d_dA_dX.is_contiguous() && dB_d_dA_dX.is_cpu());
auto dB_d_dA_dX_ptr = dB_d_dA_dX.data_ptr<double>();
auto* dB_d_dA_dX_ptr = dB_d_dA_dX.data_ptr<double>();

// dX_dr.shape == [positions gradient samples, 3, features...]
// dB_d_dA_dr.shape == [n_atoms, 3]
Expand Down Expand Up @@ -372,22 +372,22 @@ std::vector<torch::Tensor> CellGrad::forward(
always_assert(all_cells.requires_grad());
auto cell_grad = torch::zeros_like(all_cells);
always_assert(cell_grad.is_contiguous() && cell_grad.is_cpu());
auto cell_grad_ptr = cell_grad.data_ptr<double>();
auto* cell_grad_ptr = cell_grad.data_ptr<double>();

auto samples = dX_dH->samples();
auto samples_values = samples->values();
auto samples_values_ptr = samples_values.data_ptr<int32_t>();
auto* samples_values_ptr = samples_values.data_ptr<int32_t>();
always_assert(samples->names().size() == 1);
always_assert(samples->names()[0] == "sample");

// ========================= extract pointers =========================== //
auto dX_dH_values = dX_dH->values();
always_assert(dX_dH_values.is_contiguous() && dX_dH_values.is_cpu());
auto dX_dH_ptr = dX_dH_values.data_ptr<double>();
auto* dX_dH_ptr = dX_dH_values.data_ptr<double>();

const auto& dA_dX_sizes = dA_dX.sizes();
always_assert(dA_dX.is_contiguous() && dA_dX.is_cpu());
auto dA_dX_ptr = dA_dX.data_ptr<double>();
auto* dA_dX_ptr = dA_dX.data_ptr<double>();

// total size of component + property dimension
int64_t n_features = 1;
Expand All @@ -399,16 +399,16 @@ std::vector<torch::Tensor> CellGrad::forward(
for (int64_t grad_sample_i=0; grad_sample_i<samples->count(); grad_sample_i++) {
auto sample_i = samples_values_ptr[grad_sample_i];
// we get the structure from the samples of the values
auto structure_i = structures[sample_i].item<int32_t>();
auto structure_i = static_cast<int64_t>(structures[sample_i].item<int32_t>());

for (int64_t direction_1=0; direction_1<3; direction_1++) {
for (int64_t direction_2=0; direction_2<3; direction_2++) {
auto dot = 0.0;
for (int64_t i=0; i<n_features; i++) {
auto id = (grad_sample_i * 3 + direction_2) * 3 + direction_1;
auto sample_component_row = (grad_sample_i * 3 + direction_2) * 3 + direction_1;
dot += (
dA_dX_ptr[sample_i * n_features + i]
* dX_dH_ptr[id * n_features + i]
* dX_dH_ptr[sample_component_row * n_features + i]
);
}
cell_grad_ptr[(structure_i * 3 + direction_1) * 3 + direction_2] += dot;
Expand Down Expand Up @@ -440,21 +440,21 @@ std::vector<torch::Tensor> CellGrad::backward(

auto samples = dX_dH->samples();
auto samples_values = samples->values();
auto samples_values_ptr = samples_values.data_ptr<int32_t>();
auto* samples_values_ptr = samples_values.data_ptr<int32_t>();
always_assert(samples->names().size() == 1);
always_assert(samples->names()[0] == "sample");

// ========================= extract pointers =========================== //
auto dX_dH_values = dX_dH->values();
always_assert(dX_dH_values.is_contiguous() && dX_dH_values.is_cpu());
auto dX_dH_ptr = dX_dH_values.data_ptr<double>();
auto* dX_dH_ptr = dX_dH_values.data_ptr<double>();

always_assert(dB_d_dA_dH.is_contiguous() && dB_d_dA_dH.is_cpu());
auto dB_d_dA_dH_ptr = dB_d_dA_dH.data_ptr<double>();
auto* dB_d_dA_dH_ptr = dB_d_dA_dH.data_ptr<double>();

const auto& dA_dX_sizes = dA_dX.sizes();
always_assert(dA_dX.is_contiguous() && dA_dX.is_cpu());
auto dA_dX_ptr = dA_dX.data_ptr<double>();
auto* dA_dX_ptr = dA_dX.data_ptr<double>();

// total size of component + property dimension
int64_t n_features = 1;
Expand All @@ -477,14 +477,14 @@ std::vector<torch::Tensor> CellGrad::backward(
if (dA_dX.requires_grad()) {
dB_d_dA_dX = torch::zeros_like(dA_dX);
always_assert(dB_d_dA_dX.is_contiguous() && dB_d_dA_dX.is_cpu());
auto dB_d_dA_dX_ptr = dB_d_dA_dX.data_ptr<double>();
auto* dB_d_dA_dX_ptr = dB_d_dA_dX.data_ptr<double>();

// dX_dH.shape == [cell gradient samples, 3, 3, features...]
// dB_d_dA_dH.shape == [structures, 3, 3]
// dB_d_dA_dX.shape == [samples, features...]
for (int64_t grad_sample_i=0; grad_sample_i<samples->count(); grad_sample_i++) {
auto sample_i = samples_values_ptr[grad_sample_i];
auto structure_i = structures[sample_i].item<int32_t>();
auto structure_i = static_cast<int64_t>(structures[sample_i].item<int32_t>());

for (int64_t i=0; i<n_features; i++) {
auto dot = 0.0;
Expand Down
Loading

0 comments on commit 34e5438

Please sign in to comment.