Skip to content

Commit

Permalink
Merge pull request #20 from ucb-bar/rename
Browse files Browse the repository at this point in the history
REFACTOR: rename to match python convention
  • Loading branch information
T-K-233 authored Oct 15, 2024
2 parents f79620a + de7215f commit 8969565
Show file tree
Hide file tree
Showing 55 changed files with 1,053 additions and 1,051 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ The low-level implementations of kernels are under `nn/impl/<device>`.

For the low-level functions, the following naming convention is used:

`void NN_operator_datatype(size_t n, <datatype *output_ptr, size_t increment>, <datatype *input_ptr, size_t increment>);`
`void nn_operator_datatype(size_t n, <datatype *output_ptr, size_t increment>, <datatype *input_ptr, size_t increment>);`

`operator`: the name of the operator, such as `add`, `max`.

Expand Down
4 changes: 2 additions & 2 deletions docs/Tensor-Basics.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ The underlying fundamental operators will be statically typed, and hence the ten
Tensor attributes describe their dimension, shape, number of elements, and datatype.

```c
Tensor *tensor = NN_rand(2, (size_t []){ 3, 4 }, DTYPE_F32);
Tensor *tensor = nn_rand(2, (size_t []){ 3, 4 }, DTYPE_F32);

printf("Datatype of tensor: %s\n", NN_get_datatype_name(tensor->dtype));
printf("Datatype of tensor: %s\n", nn_get_datatype_name(tensor->dtype));
printf("Dimension of tensor: %d\n", tensor->ndim);
printf("Shape of tensor: (%d, %d)\n", tensor->shape[0], tensor->shape[1]);
printf("Number of elements: %d\n", tensor->size);
Expand Down
18 changes: 9 additions & 9 deletions docs/Tensor-Creation.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,45 +14,45 @@ Tensor *NN_<function-name>(<ndim>, <shape>, <datatype>, <tensor-options>)

The following factory functions are available at the time of this writing:

#### NN_tensor()
#### nn_tensor()

Returns a tensor with uninitialized values or preallocated buffer.

When passing NULL as the data buffer, the method will allocate a new chunk of uninitialized data chunk.

```c
Tensor *tensor = NN_tensor(2, (size_t []){ 2, 2 }, DTYPE_F32, NULL);
Tensor *tensor = nn_tensor(2, (size_t []){ 2, 2 }, DTYPE_F32, NULL);
```
Alternatively, tensor be created directly from an existing data buffer.
```c
// data = [[1, 2], [3, 4]]
float data[] = { 1, 2, 3, 4 };
Tensor *tensor = NN_tensor(2, (size_t []){ 2, 2 }, DTYPE_F32, data);
Tensor *tensor = nn_tensor(2, (size_t []){ 2, 2 }, DTYPE_F32, data);
```

#### NN_zeros()
#### nn_zeros()

Returns a tensor filled with all zeros.

#### NN_ones()
#### nn_ones()

Returns a tensor filled with all ones.

#### NN_full()
#### nn_full()

Returns a tensor filled with a single value.

#### NN_rand()
#### nn_rand()

Returns a tensor filled with values drawn from a uniform distribution on [0, 1).

#### NN_randint()
#### nn_randint()

Returns a tensor with integers randomly drawn from an interval.

#### NN_arange()
#### nn_arange()

Returns a tensor with a sequence of integers.

12 changes: 6 additions & 6 deletions examples/char-rnn/runtime_test_c/char-rnn.c
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ int main() {
printf("\n\n");

Matrix output;
NN_initMatrix(&output, 1, output_size);
nn_initMatrix(&output, 1, output_size);

Matrix input;
NN_initMatrix(&input, 1, input_size + hidden_size);
nn_initMatrix(&input, 1, input_size + hidden_size);

Matrix hidden;
NN_initMatrix(&hidden, 1, hidden_size);
nn_initMatrix(&hidden, 1, hidden_size);

int index;

Expand All @@ -57,17 +57,17 @@ int main() {

for (int j=1; j<strlen(str); j+=1) {
encodeOneHot(&input, str[j]);
NN_linear(&hidden, &i2h_weight_transposed, &i2h_bias, &input);
nn_linear(&hidden, &i2h_weight_transposed, &i2h_bias, &input);

forward(&output, &hidden, &input);
}

// printMatrix(&output);
index = NN_argmax(&output);
index = nn_argmax(&output);

printf("\n> %s\n", str);
printf("score: (");
NN_print_f32(output.data[index], 2);
nn_print_f32(output.data[index], 2);
printf("), predicted: (%d, %s)\n", index, categories[index]);
}

Expand Down
6 changes: 3 additions & 3 deletions examples/char-rnn/runtime_test_c/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@ static void forward(Matrix *output, Matrix *hidden, Matrix *input) {
// Input
Matrix *input_out = input;
// Linear
NN_linear(hidden, &i2h_weight_transposed, &i2h_bias, input_out);
nn_linear(hidden, &i2h_weight_transposed, &i2h_bias, input_out);
// Linear
NN_linear(output, &h2o_weight_transposed, &h2o_bias, hidden);
nn_linear(output, &h2o_weight_transposed, &h2o_bias, hidden);
// Log Softmax
NN_logSoftmax(output, output);
nn_logSoftmax(output, output);
}

#endif // __MODEL_H
40 changes: 20 additions & 20 deletions examples/char-rnn/runtime_test_c/nn.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ typedef struct {
* ====== Utility Functions ======
*/

void NN_assert(int condition, char *message) {
void nn_assert(int condition, char *message) {
if (!condition) {
printf("Assertion failed: ");
printf("%s\n", message);
Expand All @@ -31,7 +31,7 @@ void NN_assert(int condition, char *message) {
* These functions assumes that printf is available.
*/

void NN_print_f32(float v, int16_t num_digits) {
void nn_print_f32(float v, int16_t num_digits) {
int32_t scale = 1;
int32_t integer_part, fractional_part;
while (num_digits != 0) {
Expand All @@ -46,14 +46,14 @@ void NN_print_f32(float v, int16_t num_digits) {
printf("%i.%i", integer_part, fractional_part);
}

void NN_print_shape(Matrix *a) {
void nn_print_shape(Matrix *a) {
printf("(%d, %d)\n", a->rows, a->cols);
}

void NN_printMatrix(Matrix *a) {
void nn_printMatrix(Matrix *a) {
for (size_t i = 0; i < a->rows; i++) {
for (size_t j = 0; j < a->cols; j++) {
NN_print_f32(a->data[i * a->cols + j], 2);
nn_print_f32(a->data[i * a->cols + j], 2);
printf(" ");
}
printf("\n");
Expand All @@ -64,16 +64,16 @@ void NN_printMatrix(Matrix *a) {
/*
* ====== Math Functions ======
*/
void NN_initMatrix(Matrix *m, size_t rows, size_t cols) {
void nn_initMatrix(Matrix *m, size_t rows, size_t cols) {
m->rows = rows;
m->cols = cols;
m->data = malloc(rows * cols * sizeof(float));
}

void NN_matmul(Matrix *out, Matrix *a, Matrix *b) {
NN_assert(a->cols == b->rows, "matmul: dimension mismatch");
NN_assert(out->rows == a->rows, "matmul: dimension mismatch");
NN_assert(out->cols == b->cols, "matmul: dimension mismatch");
void nn_matmul(Matrix *out, Matrix *a, Matrix *b) {
nn_assert(a->cols == b->rows, "matmul: dimension mismatch");
nn_assert(out->rows == a->rows, "matmul: dimension mismatch");
nn_assert(out->cols == b->cols, "matmul: dimension mismatch");
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < b->cols; j += 1) {
float sum = 0;
Expand All @@ -85,25 +85,25 @@ void NN_matmul(Matrix *out, Matrix *a, Matrix *b) {
}
}

void NN_matadd(Matrix *out, Matrix *a, Matrix *b) {
NN_assert(a->rows == b->rows, "matadd: dimension mismatch");
NN_assert(a->cols == b->cols, "matadd: dimension mismatch");
void nn_matadd(Matrix *out, Matrix *a, Matrix *b) {
nn_assert(a->rows == b->rows, "matadd: dimension mismatch");
nn_assert(a->cols == b->cols, "matadd: dimension mismatch");
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < a->cols; j += 1) {
out->data[i * out->cols + j] = a->data[i * a->cols + j] + b->data[i * b->cols + j];
}
}
}

void NN_transpose(Matrix *out, Matrix *a) {
void nn_transpose(Matrix *out, Matrix *a) {
for (size_t i = 0; i < a->rows; i += 1) {
for (size_t j = 0; j < a->cols; j += 1) {
out->data[j * out->cols + i] = a->data[i * a->cols + j];
}
}
}

void NN_concatenate(Matrix *out, Matrix *a, Matrix *b) {
void nn_concatenate(Matrix *out, Matrix *a, Matrix *b) {
for (size_t i = 0; i < a->cols; i += 1) {
out->data[i] = a->data[i];
}
Expand All @@ -112,7 +112,7 @@ void NN_concatenate(Matrix *out, Matrix *a, Matrix *b) {
}
}

size_t NN_argmax(Matrix *a) {
size_t nn_argmax(Matrix *a) {
int max_index = 0;
float max_value = a->data[0];
for (size_t i = 1; i < a->cols; i += 1) {
Expand All @@ -128,12 +128,12 @@ size_t NN_argmax(Matrix *a) {
* ====== Operators ======
*/

void NN_linear(Matrix *out, Matrix *weight, Matrix *bias, Matrix *input) {
NN_matmul(out, input, weight);
NN_matadd(out, out, bias);
void nn_linear(Matrix *out, Matrix *weight, Matrix *bias, Matrix *input) {
nn_matmul(out, input, weight);
nn_matadd(out, out, bias);
}

void NN_logSoftmax(Matrix *out, Matrix *a) {
void nn_logSoftmax(Matrix *out, Matrix *a) {
float sum = 0;
for (size_t i = 0; i < a->cols; i += 1) {
sum += exp(a->data[i]);
Expand Down
4 changes: 2 additions & 2 deletions examples/char-rnn/runtime_test_np/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@ def forward(input):
# Input
input_out = input
# Linear
i2h_out = NN_linear(input_out, i2h_weight_transposed, i2h_bias)
i2h_out = nn_linear(input_out, i2h_weight_transposed, i2h_bias)
# Linear
h2o_out = NN_linear(i2h_out, h2o_weight_transposed, h2o_bias)
h2o_out = nn_linear(i2h_out, h2o_weight_transposed, h2o_bias)
# Log Softmax
softmax_out = nn_logsoftmax(h2o_out)
return softmax_out, i2h_out
2 changes: 1 addition & 1 deletion examples/char-rnn/runtime_test_np/nn.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import numpy as np

def NN_linear(input, weight_T, bias):
def nn_linear(input, weight_T, bias):
return np.matmul(input, weight_T) + bias

def nn_logsoftmax(input):
Expand Down
4 changes: 2 additions & 2 deletions examples/diffuse-loco/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ int main() {
init(model);

printf("setting input data...\n");
NN_fill(&model->input_1, 1.0);
nn_fill(&model->input_1, 1.0);

// cycles = READ_CSR("mcycle");
forward(model);
Expand All @@ -44,7 +44,7 @@ int main() {
// output tensor([[ 0.0258, -0.0050, 0.0902, -0.0022, -0.0924, -0.0574, 0.0328, 0.0386, -0.0277, 0.0788, 0.0603, -0.0085]])

printf("output:\n");
NN_printf(&model->actor_6);
nn_printf(&model->actor_6);

return 0;
}
46 changes: 23 additions & 23 deletions examples/diffuse-loco/model.h
Original file line number Diff line number Diff line change
Expand Up @@ -41,44 +41,44 @@ void forward(Model *model);
void init(Model *model) {
float *array_pointer = (float *)model_weight_data;

NN_init_tensor(&model->input_1, 2, (size_t[]){1, 48}, DTYPE_F32, NULL);
nn_init_tensor(&model->input_1, 2, (size_t[]){1, 48}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.linear.Linear'>: actor_0
NN_init_tensor(&model->actor_0_weight, 2, (size_t[]){512, 48}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_0_weight, 2, (size_t[]){512, 48}, DTYPE_F32, array_pointer);
array_pointer += 24576;
NN_init_tensor(&model->actor_0_bias, 1, (size_t[]){512}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_0_bias, 1, (size_t[]){512}, DTYPE_F32, array_pointer);
array_pointer += 512;
NN_init_tensor(&model->actor_0, 2, (size_t[]){1, 512}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_0, 2, (size_t[]){1, 512}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.activation.ELU'>: actor_1
NN_init_tensor(&model->actor_1, 2, (size_t[]){1, 512}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_1, 2, (size_t[]){1, 512}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.linear.Linear'>: actor_2
NN_init_tensor(&model->actor_2_weight, 2, (size_t[]){256, 512}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_2_weight, 2, (size_t[]){256, 512}, DTYPE_F32, array_pointer);
array_pointer += 131072;
NN_init_tensor(&model->actor_2_bias, 1, (size_t[]){256}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_2_bias, 1, (size_t[]){256}, DTYPE_F32, array_pointer);
array_pointer += 256;
NN_init_tensor(&model->actor_2, 2, (size_t[]){1, 256}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_2, 2, (size_t[]){1, 256}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.activation.ELU'>: actor_3
NN_init_tensor(&model->actor_3, 2, (size_t[]){1, 256}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_3, 2, (size_t[]){1, 256}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.linear.Linear'>: actor_4
NN_init_tensor(&model->actor_4_weight, 2, (size_t[]){128, 256}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_4_weight, 2, (size_t[]){128, 256}, DTYPE_F32, array_pointer);
array_pointer += 32768;
NN_init_tensor(&model->actor_4_bias, 1, (size_t[]){128}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_4_bias, 1, (size_t[]){128}, DTYPE_F32, array_pointer);
array_pointer += 128;
NN_init_tensor(&model->actor_4, 2, (size_t[]){1, 128}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_4, 2, (size_t[]){1, 128}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.activation.ELU'>: actor_5
NN_init_tensor(&model->actor_5, 2, (size_t[]){1, 128}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_5, 2, (size_t[]){1, 128}, DTYPE_F32, NULL);

// <class 'torch.nn.modules.linear.Linear'>: actor_6
NN_init_tensor(&model->actor_6_weight, 2, (size_t[]){12, 128}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_6_weight, 2, (size_t[]){12, 128}, DTYPE_F32, array_pointer);
array_pointer += 1536;
NN_init_tensor(&model->actor_6_bias, 1, (size_t[]){12}, DTYPE_F32, array_pointer);
nn_init_tensor(&model->actor_6_bias, 1, (size_t[]){12}, DTYPE_F32, array_pointer);
array_pointer += 12;
NN_init_tensor(&model->actor_6, 2, (size_t[]){1, 12}, DTYPE_F32, NULL);
nn_init_tensor(&model->actor_6, 2, (size_t[]){1, 12}, DTYPE_F32, NULL);

}

Expand All @@ -87,13 +87,13 @@ void init(Model *model) {
* Forward pass of the model
*/
void forward(Model *model) {
NN_linear(&model->actor_0, &model->input_1, &model->actor_0_weight, &model->actor_0_bias);
NN_elu(&model->actor_1, &model->actor_0, 1.0);
NN_linear(&model->actor_2, &model->actor_1, &model->actor_2_weight, &model->actor_2_bias);
NN_elu(&model->actor_3, &model->actor_2, 1.0);
NN_linear(&model->actor_4, &model->actor_3, &model->actor_4_weight, &model->actor_4_bias);
NN_elu(&model->actor_5, &model->actor_4, 1.0);
NN_linear(&model->actor_6, &model->actor_5, &model->actor_6_weight, &model->actor_6_bias);
nn_linear(&model->actor_0, &model->input_1, &model->actor_0_weight, &model->actor_0_bias);
nn_elu(&model->actor_1, &model->actor_0, 1.0);
nn_linear(&model->actor_2, &model->actor_1, &model->actor_2_weight, &model->actor_2_bias);
nn_elu(&model->actor_3, &model->actor_2, 1.0);
nn_linear(&model->actor_4, &model->actor_3, &model->actor_4_weight, &model->actor_4_bias);
nn_elu(&model->actor_5, &model->actor_4, 1.0);
nn_linear(&model->actor_6, &model->actor_5, &model->actor_6_weight, &model->actor_6_bias);

}

Expand Down
6 changes: 3 additions & 3 deletions examples/fast-depth/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ int main() {
init(model);

printf("setting input data...\n");
// NN_fill(&model->x, 0.0);
// nn_fill(&model->x, 0.0);
memcpy((uint8_t *)model->x.data, (uint8_t *)model_input_data, (size_t)model_input_end - (size_t)model_input_start);

// cycles = READ_CSR("mcycle");
Expand All @@ -55,9 +55,9 @@ int main() {

printf("cycles: %lu\n", cycles);

Tensor *img = NN_tensor(4, (const size_t[]){1, model->decode_conv6_2.shape[1] / 8, model->decode_conv6_2.shape[2] / 4, 1}, DTYPE_F32, NULL);
Tensor *img = nn_tensor(4, (const size_t[]){1, model->decode_conv6_2.shape[1] / 8, model->decode_conv6_2.shape[2] / 4, 1}, DTYPE_F32, NULL);

NN_interpolate(img, &model->decode_conv6_2, (float []){0.125, 0.25});
nn_interpolate(img, &model->decode_conv6_2, (float []){0.125, 0.25});

printf("output:\n");
show_ASCII_image(img, 0, 0);
Expand Down
Loading

0 comments on commit 8969565

Please sign in to comment.