Skip to content

Commit

Permalink
styles applied
Browse files Browse the repository at this point in the history
  • Loading branch information
mbencer committed Oct 5, 2024
1 parent 2cbe01d commit 9ded99d
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,8 @@ template <typename T_BackendContext> ITensorRegistry *genTensors(T_BackendContex

// process source tensors for shared memory at first
std::vector<ir::OperandIndex> registered_source_ind;
for(const auto& [_, source_ind] : tensor_builder->getOperandsWithSharedMemory()) {
for (const auto &[_, source_ind] : tensor_builder->getOperandsWithSharedMemory())
{
if (ctx.external_operands().contains(source_ind))
continue;
tensor_builder->registerTensorInfo(source_ind, graph.operands().at(source_ind).info());
Expand All @@ -194,7 +195,8 @@ template <typename T_BackendContext> ITensorRegistry *genTensors(T_BackendContex
graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
if (ctx.external_operands().contains(ind))
return;
if(std::find(std::begin(registered_source_ind), std::end(registered_source_ind), ind) != std::end(registered_source_ind)) // skip tensors already registered
if (std::find(std::begin(registered_source_ind), std::end(registered_source_ind), ind) !=
std::end(registered_source_ind)) // skip tensors already registered
return;
tensor_builder->registerTensorInfo(ind, obj.info());
});
Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/include/backend/basic/TensorBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ class TensorBuilder

void allocate(void);

const ir::OperandIndexMap<ir::OperandIndex>& getOperandsWithSharedMemory() const;
const ir::OperandIndexMap<ir::OperandIndex> &getOperandsWithSharedMemory() const;

DynamicTensorManager *dynamicTensorManager(void) { return _dynamic_tensor_mgr.get(); }

Expand Down
5 changes: 3 additions & 2 deletions runtime/onert/core/src/backend/basic/TensorBuilder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ TensorBuilder::TensorBuilder(
: _tensor_reg{tensor_reg}, _dynamic_tensor_mgr{new DynamicTensorManager(_tensor_reg)},
_static_tensor_mgr{
new StaticTensorManager(_tensor_reg, _dynamic_tensor_mgr.get(), operands_with_shared_memory)},
_operands_with_shared_memory{operands_with_shared_memory}
_operands_with_shared_memory{operands_with_shared_memory}
{
/* empty */
}
Expand Down Expand Up @@ -89,7 +89,8 @@ bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const

void TensorBuilder::allocate(void) { _static_tensor_mgr->allocateNonconsts(); }

const ir::OperandIndexMap<ir::OperandIndex>& TensorBuilder::getOperandsWithSharedMemory() const {
const ir::OperandIndexMap<ir::OperandIndex> &TensorBuilder::getOperandsWithSharedMemory() const
{
return _operands_with_shared_memory;
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/core/src/backend/builtin/TensorBuilder.cc
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@ basic::Tensor *TensorBuilder::nativeOwnTensorAt(const ir::OperandIndex &ind)
return _tensor_reg->getNativeOwnTensor(ind);
}

const ir::OperandIndexMap<ir::OperandIndex>& TensorBuilder::getOperandsWithSharedMemory() const {
const ir::OperandIndexMap<ir::OperandIndex> &TensorBuilder::getOperandsWithSharedMemory() const
{
return _operands_with_shared_memory;
}

Expand Down
2 changes: 1 addition & 1 deletion runtime/onert/core/src/backend/builtin/TensorBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class TensorBuilder

void allocate(void);

const ir::OperandIndexMap<ir::OperandIndex>& getOperandsWithSharedMemory() const;
const ir::OperandIndexMap<ir::OperandIndex> &getOperandsWithSharedMemory() const;

DynamicTensorManager *dynamicTensorManager(void);

Expand Down
3 changes: 2 additions & 1 deletion tests/nnfw_api/lib/CircleGen.cc
Original file line number Diff line number Diff line change
Expand Up @@ -582,7 +582,8 @@ uint32_t CircleGen::addOperatorSquare(const OperatorParams &params)
circle::BuiltinOptions_SquareOptions, options);
}

uint32_t CircleGen::addOperatorSqueeze(const OperatorParams &params, const std::vector<int32_t>& squeeze_dims)
uint32_t CircleGen::addOperatorSqueeze(const OperatorParams &params,
const std::vector<int32_t> &squeeze_dims)
{
auto squeeze_dims_vec = _fbb.CreateVector(squeeze_dims.data(), squeeze_dims.size());
auto options = circle::CreateSqueezeOptions(_fbb, squeeze_dims_vec).Union();
Expand Down
3 changes: 2 additions & 1 deletion tests/nnfw_api/lib/CircleGen.h
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,8 @@ class CircleGen
uint32_t addOperatorSplit(const OperatorParams &params, int32_t num_split);
uint32_t addOperatorSqrt(const OperatorParams &params);
uint32_t addOperatorSquare(const OperatorParams &params);
uint32_t addOperatorSqueeze(const OperatorParams &params, const std::vector<int32_t>& squeeze_dims);
uint32_t addOperatorSqueeze(const OperatorParams &params,
const std::vector<int32_t> &squeeze_dims);
uint32_t addOperatorStridedSlice(const OperatorParams &params, int32_t begin_mask = 0,
int32_t end_mask = 0, int32_t ellipsis_mask = 0,
int32_t new_axis_mask = 0, int32_t shrink_axis_mask = 0);
Expand Down
24 changes: 15 additions & 9 deletions tests/nnfw_api/src/GenModelTests/MemorySharingModels.test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ TEST_F(GenModelTest, OptimizedReshapeInferenceSuccessfully)
cgen.setInputsAndOutputs({input}, {cos2_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->addTestCase(
uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->setBackends({"cpu"});

SUCCEED();
Expand All @@ -61,7 +62,8 @@ TEST_F(GenModelTest, OptimizedExpandDimsInferenceSuccessfully)
cgen.setInputsAndOutputs({input}, {cos2_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->addTestCase(
uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->setBackends({"cpu"});

SUCCEED();
Expand All @@ -84,7 +86,8 @@ TEST_F(GenModelTest, OptimizedReshapeConstInput)
cgen.setInputsAndOutputs({}, {cos_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362}}));
_context->addTestCase(
uniformTCD<float>({}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362}}));
_context->setBackends({"cpu"});

SUCCEED();
Expand All @@ -100,10 +103,10 @@ TEST_F(GenModelTest, OptimizedReshapeDynOutput)
int cast2_out = cgen.addTensor({{2, 2}, circle::TensorType::TensorType_INT32});

cgen.addOperatorCast({{cast_in}, {cast_out}}, circle::TensorType::TensorType_INT32,
circle::TensorType::TensorType_FLOAT32);
circle::TensorType::TensorType_FLOAT32);
cgen.addOperatorReshape({{cast_out, new_shape}, {reshape_out}});
cgen.addOperatorCast({{reshape_out}, {cast2_out}}, circle::TensorType::TensorType_FLOAT32,
circle::TensorType::TensorType_INT32);
circle::TensorType::TensorType_INT32);
cgen.setInputsAndOutputs({cast_in, new_shape}, {cast2_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
Expand All @@ -128,7 +131,8 @@ TEST_F(GenModelTest, OptimizedSqueezeInferenceSuccessfully)
cgen.setInputsAndOutputs({input}, {cos2_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->addTestCase(
uniformTCD<float>({{1, 2, 3, 4}}, {{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->setBackends({"cpu"});

SUCCEED();
Expand Down Expand Up @@ -171,7 +175,9 @@ TEST_F(GenModelTest, ReshapeInputModelOutput)
cgen.setInputsAndOutputs({input}, {cos1_out, cos2_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362}, {0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->addTestCase(
uniformTCD<float>({{1, 2, 3, 4}}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362},
{0.85755322, 0.91465333, 0.54869613, 0.79387345}}));
_context->setBackends({"cpu"});

SUCCEED();
Expand All @@ -192,9 +198,9 @@ TEST_F(GenModelTest, ReshapeOutputModelOutput)
cgen.setInputsAndOutputs({input}, {reshape_out});

_context = std::make_unique<GenModelTestContext>(cgen.finish());
_context->addTestCase(uniformTCD<float>({{1, 2, 3, 4}}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362}}));
_context->addTestCase(
uniformTCD<float>({{1, 2, 3, 4}}, {{0.54030231, -0.41614684, -0.9899925, -0.65364362}}));
_context->setBackends({"cpu"});

SUCCEED();
}

0 comments on commit 9ded99d

Please sign in to comment.