-
Notifications
You must be signed in to change notification settings - Fork 0
/
1LayerNeuralNet.py
137 lines (96 loc) · 3.38 KB
/
1LayerNeuralNet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import csv
import numpy as np
class OneLayerNN:
def __init__(self, neurons: int = 10, input_size: int = 784,
learn_rate: float = 0.01):
self.neurons = neurons
self.input_size = input_size
self.weights = np.random.rand(neurons, input_size) / np.math.sqrt(input_size)
self.learn_rate = learn_rate
def binarize(self, input_vector: np.array):
new_vector = input_vector > 0
return new_vector
def _sigmoid(self, vector: np.array):
new_vector = 1 / (1 + np.exp(- vector))
return new_vector
def _prediction_vector(self, input_vector: np.array):
# Should be a 10x1 list output
values = (self.weights @ input_vector)
values = self._sigmoid(values)
return values
def predict(self, input_vector: np.array):
vector = self._prediction_vector(input_vector)
return np.argmax(vector)
def _generate_label_vector(self, label):
out = np.zeros((self.neurons))
out[label] = 1
return out
def gd(self, batch_labels, batch_data):
accumulated_errors = np.zeros((self.neurons, self.input_size))
for i in range(len(batch_labels)):
input_vector = batch_data[i]
vector = self._prediction_vector(input_vector)
label = batch_labels[i]
target = self._generate_label_vector(label)
delta = vector * (1 - vector) * (target - vector)
accumulated_errors += np.outer(delta, input_vector)
# Above helps speed up computation compared to for loop such as:
# for i in range(self.neurons):
# accumulated_errors[i] += delta[i] * input_vector
self.weights += self.learn_rate * accumulated_errors
agent = OneLayerNN(10)
batch_size = 100
epochs = 20
full_data = []
with open('train_1000.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
try:
label = int(row[0])
except:
continue
try:
data = list(map(int, row[1:]))
except:
continue
data = np.array(data)
data = agent.binarize(data)
full_data.append((data, label))
csvFile.close()
for epoch in range(epochs):
print("Epoch: " + str(epoch + 1))
episode = 0
batch_data = []
batch_labels = []
np.random.shuffle(full_data)
for i in range(len(full_data)):
batch_data.append(full_data[i][0])
batch_labels.append(full_data[i][1])
if i % batch_size == batch_size-1:
agent.gd(batch_labels, batch_data)
batch_labels = []
batch_data = []
# if (episode % 100) == 0:
# print("episode: " + str(episode))
episode += 1
total_correct = 0
total_examples = 0
with open('test_mini_labeled.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
for row in reader:
try:
label = int(row[0])
except:
continue
try:
data = list(map(int,row[1:]))
except:
continue
data = np.array(data)
data = agent.binarize(data)
total_examples += 1
total_correct += (agent.predict(data) == label)
csvFile.close()
print("Correct predictions: " + str(total_correct) + "/" + str(total_examples))
print("Accuracy: " + str(np.round(total_correct / total_examples * 100, 2))
+ "%")