Skip to content

Commit

Permalink
Implemented method to calculate paralelized nn query
Browse files Browse the repository at this point in the history
  • Loading branch information
guillermogilg99 committed Aug 27, 2024
1 parent 70c3170 commit d8d01d6
Show file tree
Hide file tree
Showing 5 changed files with 290 additions and 53 deletions.
2 changes: 1 addition & 1 deletion include/utils/SaveDataVariantToFile.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ namespace Planners
if( ! fs::exists(_file_path) ){ //If file does not exist, write a header with field names
std::cout << "File does not exists. Creating header at first line" << std::endl;
out_file_data_.open(_file_path, std::ofstream::app);
out_file_data_ << "path" << std::endl;
//out_file_data_ << "path" << std::endl;
}else{
out_file_data_.open(_file_path, std::ofstream::app);
}
Expand Down
12 changes: 6 additions & 6 deletions src/Planners/AStarSIREN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,11 @@ inline unsigned int AStarSIREN::computeG(const Node* _current, Node* _suc, unsig
float real_z = _suc->coordinates.z * map_res;

// Query the net
torch::Tensor input_tensor = torch::tensor({{real_x, real_y, real_z}}, torch::kFloat32).to(torch::kCPU);
torch::Tensor input_tensor = torch::tensor({{real_x, real_y, real_z}}, torch::kFloat32).requires_grad_(true).to(torch::kCPU);
std::vector<torch::jit::IValue> inputs;
inputs.push_back(input_tensor);
// Set the model to evaluation mode
//sdf_net.eval();
loaded_sdf.eval();
// Query the model
auto start = std::chrono::high_resolution_clock::now();
//torch::Tensor output_tensor = sdf_net.forward(input_tensor);
Expand All @@ -31,11 +31,11 @@ inline unsigned int AStarSIREN::computeG(const Node* _current, Node* _suc, unsig
std::chrono::duration<double, std::milli> duration = end - start;
// Convert the output tensor to a scalar value (assuming the model outputs a single value per input)
float model_output = output_tensor.item<float>();
// auto cost_term = static_cast<unsigned int>(cost_weight_ * _suc->cost * dist_scale_factor_reduced_);

//auto cost_term = static_cast<unsigned int>(cost_weight_ * model_output * dist_scale_factor_reduced_);

// std::cout << "Time taken to query model: " << duration.count() << " ms" << std::endl;
//output_tensor.backward();
//torch::Tensor gradient = input_tensor.grad();

//std::cout << "Model output: " << model_output << "| Gradient: " << gradient << std::endl;
auto cost_term = static_cast<unsigned int>(cost_weight_ * (1/(model_output * dist_scale_factor_reduced_)));

// auto cost_term = static_cast<unsigned int>(cost_weight_ * (1/(((static_cast<double>(_current->cost) + static_cast<double>(_suc->cost))/2) * dist_scale_factor_reduced_)));
Expand Down
6 changes: 3 additions & 3 deletions src/Planners/LazyThetaStarM1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ namespace Planners

if ( successor2->isInClosedList )
{
auto cost_term = static_cast<unsigned int>(cost_weight_ * successor2->cost * dist_scale_factor_reduced_);
auto cost_term = static_cast<unsigned int>(cost_weight_ / (successor2->cost * dist_scale_factor_reduced_));
G_new = successor2->G + geometry::distanceBetween2Nodes(successor2, _s_aux) + cost_term;
if (G_new < G_max)
{
Expand All @@ -43,7 +43,7 @@ namespace Planners
if ( distanceParent2_nodes == 0 )
distanceParent2_nodes = 1;

auto cost_term = static_cast<unsigned int>(cost_weight_ * _s2_aux->cost * dist_scale_factor_reduced_) * distanceParent2_nodes;
auto cost_term = static_cast<unsigned int>(cost_weight_ / (_s2_aux->cost * dist_scale_factor_reduced_)) * distanceParent2_nodes;
if ( ( _s_aux->parent->G + distanceParent2 + cost_term ) < _s2_aux->G )
{
_s2_aux->parent = _s_aux->parent;
Expand All @@ -63,7 +63,7 @@ namespace Planners
cost += (_n_i < 6 ? dist_scale_factor_ : (_n_i < 18 ? dd_2D_ : dd_3D_)); //This is more efficient
}

auto cost_term = static_cast<unsigned int>(cost_weight_ * _suc->cost * dist_scale_factor_reduced_);
auto cost_term = static_cast<unsigned int>(cost_weight_ / (_suc->cost * dist_scale_factor_reduced_));
cost += cost_term;
_suc->C = cost_term;

Expand Down
6 changes: 3 additions & 3 deletions src/Planners/ThetaStarM1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ namespace Planners
if (n_checked_nodes==0)
n_checked_nodes = 1;

auto cost_term = static_cast<unsigned int>(cost_weight_ * _s2_aux->cost * dist_scale_factor_reduced_)* n_checked_nodes;
auto cost_term = static_cast<unsigned int>(cost_weight_ / (_s2_aux->cost * dist_scale_factor_reduced_))* n_checked_nodes;
if ( (_s_aux->parent->G + distanceParent2 + cost_term) < _s2_aux->G ) // Conmensurable
{
_s2_aux->parent = _s_aux->parent;
Expand All @@ -28,7 +28,7 @@ namespace Planners
} else {
auto distance2 = geometry::distanceBetween2Nodes(_s_aux, _s2_aux);

auto cost_term = static_cast<unsigned int>(cost_weight_ * _s2_aux->cost * dist_scale_factor_reduced_);
auto cost_term = static_cast<unsigned int>(cost_weight_ / (_s2_aux->cost * dist_scale_factor_reduced_));
unsigned int G_new = _s_aux->G + distance2 + cost_term;

if ( G_new < _s2_aux->G){
Expand All @@ -50,7 +50,7 @@ namespace Planners
cost += (_n_i < 6 ? dist_scale_factor_ : (_n_i < 18 ? dd_2D_ : dd_3D_)); //This is more efficient
}

auto cost_term = static_cast<unsigned int>(cost_weight_ * _suc->cost * dist_scale_factor_reduced_);
auto cost_term = static_cast<unsigned int>(cost_weight_ / (_suc->cost * dist_scale_factor_reduced_));
cost += cost_term;
_suc->C = cost_term;

Expand Down
Loading

0 comments on commit d8d01d6

Please sign in to comment.