Skip to content

Commit

Permalink
Set up benchmark methods to work with either float or double data typ…
Browse files Browse the repository at this point in the history
…es (#123)

* Set up benchmark methods to work with either float or double data types

* Fix typo
  • Loading branch information
jatinchowdhury18 authored Dec 1, 2023
1 parent 33bee31 commit c65b549
Showing 1 changed file with 34 additions and 33 deletions.
67 changes: 34 additions & 33 deletions bench/layer_creator.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,15 @@
#include <RTNeural.h>
#include <random>

template <typename DenseType>
template <typename Float = double, typename DenseType>
void randomise_dense(DenseType &dense) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0, 1.0);
std::uniform_real_distribution<Float> distribution((Float) -1, (Float) 1);

// random weights
std::vector<std::vector<double>> denseWeights(dense.out_size);
std::vector<std::vector<Float>> denseWeights(dense.out_size);
for (auto &w : denseWeights)
w.resize(dense.in_size, 0.0);
w.resize(dense.in_size, Float{});

for (size_t i = 0; i < dense.out_size; ++i)
for (size_t j = 0; j < dense.in_size; ++j)
Expand All @@ -20,25 +20,25 @@ void randomise_dense(DenseType &dense) {
dense.setWeights(denseWeights);

// random biases
std::vector<double> denseBias(dense.out_size);
std::vector<Float> denseBias(dense.out_size);
for (size_t i = 0; i < dense.out_size; ++i)
denseBias[i] = distribution(generator);

dense.setBias(denseBias.data());
}

template <typename ConvType>
template <typename Float = double, typename ConvType>
void randomise_conv1d(ConvType &conv, size_t kernel_size) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0, 1.0);
std::uniform_real_distribution<Float> distribution((Float) -1, (Float) 1);

// random weights
std::vector<std::vector<std::vector<double>>> convWeights(conv.out_size);
std::vector<std::vector<std::vector<Float>>> convWeights(conv.out_size);
for(auto& wIn : convWeights)
{
wIn.resize(conv.in_size);
for(auto& w : wIn)
w.resize(kernel_size, 0.0);
w.resize(kernel_size, Float{});
}

for (size_t i = 0; i < conv.out_size; ++i)
Expand All @@ -49,22 +49,22 @@ void randomise_conv1d(ConvType &conv, size_t kernel_size) {
conv.setWeights(convWeights);

// random biases
std::vector<double> convBias(conv.out_size);
std::vector<Float> convBias(conv.out_size);
for (size_t i = 0; i < conv.out_size; ++i)
convBias[i] = distribution(generator);

conv.setBias(convBias);
}

template <typename GruType>
template <typename Float = double, typename GruType>
void randomise_gru(GruType &gru) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0, 1.0);
std::uniform_real_distribution<Float> distribution((Float) -1, (Float) 1);

// kernel weights
std::vector<std::vector<double>> kernelWeights(gru.in_size);
std::vector<std::vector<Float>> kernelWeights(gru.in_size);
for (auto &w : kernelWeights)
w.resize(3 * gru.out_size, 0.0);
w.resize(3 * gru.out_size, Float{});

for (size_t i = 0; i < gru.in_size; ++i)
for (size_t j = 0; j < 3 * gru.out_size; ++j)
Expand All @@ -73,9 +73,9 @@ void randomise_gru(GruType &gru) {
gru.setWVals(kernelWeights);

// recurrent weights
std::vector<std::vector<double>> recurrentWeights(gru.out_size);
std::vector<std::vector<Float>> recurrentWeights(gru.out_size);
for (auto &w : recurrentWeights)
w.resize(3 * gru.out_size, 0.0);
w.resize(3 * gru.out_size, Float{});

for (size_t i = 0; i < gru.out_size; ++i)
for (size_t j = 0; j < 3 * gru.out_size; ++j)
Expand All @@ -84,9 +84,9 @@ void randomise_gru(GruType &gru) {
gru.setUVals(recurrentWeights);

// biases
std::vector<std::vector<double>> gru_bias(2);
std::vector<std::vector<Float>> gru_bias(2);
for (auto &w : gru_bias)
w.resize(3 * gru.out_size, 0.0);
w.resize(3 * gru.out_size, Float{});

for (size_t i = 0; i < 2; ++i)
for (size_t j = 0; j < 3 * gru.out_size; ++j)
Expand All @@ -95,15 +95,15 @@ void randomise_gru(GruType &gru) {
gru.setBVals(gru_bias);
}

template <typename LstmType>
template <typename Float = double, typename LstmType>
void randomise_lstm(LstmType &lstm) {
std::default_random_engine generator;
std::uniform_real_distribution<double> distribution(-1.0, 1.0);
std::uniform_real_distribution<Float> distribution((Float) -1, (Float) 1);

// kernel weights
std::vector<std::vector<double>> kernelWeights(lstm.in_size);
std::vector<std::vector<Float>> kernelWeights(lstm.in_size);
for (auto &w : kernelWeights)
w.resize(4 * lstm.out_size, 0.0);
w.resize(4 * lstm.out_size, Float{});

for (size_t i = 0; i < lstm.in_size; ++i)
for (size_t j = 0; j < 4 * lstm.out_size; ++j)
Expand All @@ -112,9 +112,9 @@ void randomise_lstm(LstmType &lstm) {
lstm.setWVals(kernelWeights);

// recurrent weights
std::vector<std::vector<double>> recurrentWeights(lstm.out_size);
std::vector<std::vector<Float>> recurrentWeights(lstm.out_size);
for (auto &w : recurrentWeights)
w.resize(4 * lstm.out_size, 0.0);
w.resize(4 * lstm.out_size, Float{});

for (size_t i = 0; i < lstm.out_size; ++i)
for (size_t j = 0; j < 4 * lstm.out_size; ++j)
Expand All @@ -123,44 +123,45 @@ void randomise_lstm(LstmType &lstm) {
lstm.setUVals(recurrentWeights);

// biases
std::vector<double> lstm_bias(4 * lstm.out_size);
std::vector<Float> lstm_bias(4 * lstm.out_size);
for (size_t i = 0; i < 4 * lstm.out_size; ++i)
lstm_bias[i] = distribution(generator);

lstm.setBVals(lstm_bias);
}

std::unique_ptr<RTNeural::Layer<double>>
template <typename Float = double>
std::unique_ptr<RTNeural::Layer<Float>>
create_layer(const std::string &layer_type, size_t in_size, size_t out_size) {
if (layer_type == "dense") {
auto layer = std::make_unique<RTNeural::Dense<double>>(in_size, out_size);
auto layer = std::make_unique<RTNeural::Dense<Float>>(in_size, out_size);
randomise_dense(*layer);
return std::move(layer);
}

if (layer_type == "conv1d") {
const auto kernel_size = in_size - 1;
auto layer = std::make_unique<RTNeural::Conv1D<double>>(in_size, out_size, kernel_size, 1);
auto layer = std::make_unique<RTNeural::Conv1D<Float>>(in_size, out_size, kernel_size, 1);
randomise_conv1d(*layer, kernel_size);
return std::move(layer);
}

if (layer_type == "gru") {
auto layer =
std::make_unique<RTNeural::GRULayer<double>>(in_size, out_size);
std::make_unique<RTNeural::GRULayer<Float>>(in_size, out_size);
randomise_gru(*layer);
return std::move(layer);
}

if (layer_type == "lstm") {
auto layer =
std::make_unique<RTNeural::LSTMLayer<double>>(in_size, out_size);
std::make_unique<RTNeural::LSTMLayer<Float>>(in_size, out_size);
randomise_lstm(*layer);
return std::move(layer);
}

if (layer_type == "tanh") {
auto layer = std::make_unique<RTNeural::TanhActivation<double>>(in_size);
auto layer = std::make_unique<RTNeural::TanhActivation<Float>>(in_size);
return std::move(layer);
}

Expand All @@ -170,12 +171,12 @@ create_layer(const std::string &layer_type, size_t in_size, size_t out_size) {
}

if (layer_type == "sigmoid") {
auto layer = std::make_unique<RTNeural::SigmoidActivation<double>>(in_size);
auto layer = std::make_unique<RTNeural::SigmoidActivation<Float>>(in_size);
return std::move(layer);
}

if (layer_type == "softmax") {
auto layer = std::make_unique<RTNeural::SoftmaxActivation<double>>(in_size);
auto layer = std::make_unique<RTNeural::SoftmaxActivation<Float>>(in_size);
return std::move(layer);
}

Expand Down

0 comments on commit c65b549

Please sign in to comment.