Skip to content
This repository was archived by the owner on Oct 29, 2018. It is now read-only.

Commit e778913

Browse files
Few changes in variable naming
1 parent 4c8303f commit e778913

File tree

3 files changed

+23
-67
lines changed

3 files changed

+23
-67
lines changed

AutoEncoder.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,6 @@ def __init__(self, inp_dim, hid_dim):
2626
self.b = -3 * np.ones((hid_dim, 1))
2727
# learning rate for synaptic plasticity of read-out layer (RO)
2828
self.lrateRO = 0.01
29-
# 0.0001 * (2 / (3 * lnum)) #numerical regularization constant
3029
self.regRO = 0.0002
3130
self.decayP = 0 # decay factor for positive weights [0..1]
3231
self.decayN = 1 # decay factor for negative weights [0..1]
@@ -99,6 +98,7 @@ def train(self, X):
9998

10099
def update(self):
101100
self.g = np.dot(self.W.transpose(), self.inp)
101+
102102
# Apply activation function
103103
self.h = float(1) / (1 + np.exp(-self.a * self.g - self.b))
104104

Node.py

+15-55
Original file line numberDiff line numberDiff line change
@@ -14,50 +14,47 @@ def __init__(self, layer_number, node_pos, cifar_stat={'patch_mean': [], 'patch_
1414
self.layer_number = layer_number
1515
self.node_position = node_pos
1616
self.belief = []
17-
# cifarStat = load_cifar(4)# to be used for Normalization and Whitening
18-
# Purposes
1917
self.patch_mean = cifar_stat['patch_mean']
2018
self.patch_std = cifar_stat['patch_std']
2119
self.v = cifar_stat['whiten_mat']
20+
self.learning_algorithm = []
21+
self.input = []
22+
self.algorithm_choice = []
2223

2324
def init_node_learning_params(self, algorithm_choice, alg_params):
2425
self.algorithm_choice = algorithm_choice
2526
if algorithm_choice == 'Clustering':
2627
cents_per_layer = alg_params['num_cents_per_layer']
27-
# input_width = input_widths[layer_num]
28+
2829
if self.layer_number == 0:
2930
input_width = 48
3031
else:
3132
input_width = cents_per_layer[self.layer_number - 1] * 4
3233
self.learning_algorithm = Clustering(alg_params['mr'], alg_params['vr'], alg_params['sr'], input_width,
3334
alg_params['num_cents_per_layer'][self.layer_number], self.node_position)
34-
# mr, vr, sr, di, ce, node_id
3535
else:
3636
self.belief = np.ones((alg_params[self.layer_number][1], 1))
3737
self.algorithm_choice = algorithm_choice
3838
self.learning_algorithm = NNSAE(
3939
alg_params[self.layer_number][0], alg_params[self.layer_number][1])
4040

41-
def load_input(self, In):
41+
def load_input(self, in_):
4242
if self.layer_number == 0:
43-
In = In - self.patch_mean
44-
In = In / self.patch_std
45-
In = In.dot(self.v)
46-
self.input = In
43+
in_ = in_ - self.patch_mean
44+
in_ = in_ / self.patch_std
45+
in_ = in_.dot(self.v)
46+
self.input = in_
4747

4848
def do_node_learning(self, mode):
4949
if self.algorithm_choice == 'Clustering':
5050
self.learning_algorithm.update_node(self.input, mode)
5151
self.belief = self.learning_algorithm.belief
5252
else:
5353
self.learning_algorithm.train(self.input)
54-
W = np.transpose((self.learning_algorithm.W + 0.00005)/np.sum((self.learning_algorithm.W + 0.00005),0))
55-
input_ = self.input/np.sum(self.input,0)
56-
# input_ = np.transpose(self.input)/np.sum(np.transpose(self.input),0)
57-
# print np.shape(W)
58-
# print np.shape(input_)
59-
# activations = np.dot(W, input_) + 0.00005
60-
dist = W - input_
54+
eps = np.exp(-10)
55+
weight = np.transpose((self.learning_algorithm.W + eps)/np.sum((self.learning_algorithm.W + eps), 0))
56+
input_ = self.input/np.sum(self.input, 0)
57+
dist = weight - input_
6158
sq_dist = np.square(dist)
6259
norm_dist = np.sum(sq_dist, axis=1)
6360
chk = (norm_dist == 0)
@@ -66,42 +63,5 @@ def do_node_learning(self, mode):
6663
self.belief[chk] = 1.0
6764
else:
6865
norm_dist = 1 / norm_dist
69-
belief = (norm_dist / sum(norm_dist)) # .reshape(np.shape(self.belief)[0], np.shape(self.belief)[1])
70-
#belief = activations / (np.sum(activations))
71-
self.belief = belief
72-
# belief = np.maximum(activations, 0)
73-
# self.belief = belief
74-
# for K in range(activations.shape[0]):
75-
# belief[K] = max(0.0, float((activations[K] - 0.025)))
76-
# self.belief = np.asarray(belief)/np.sum(belief)
77-
78-
"""
79-
80-
def calcuatebelief(self, input_):
81-
self.load_inputTonodes(input_, [4,4])
82-
for I in range(len(self.nodes)):
83-
for J in range(len(self.nodes[0])):
84-
W = np.transpose(self.NNSAE.W)
85-
Image = return_node_input(input_, [I*4, J*4], [4,4], 'Adjacent', 'Color')
86-
activations = np.dot(W, np.transpose(Image))
87-
activations = activations/sum(sum(activations))
88-
self.nodes[I][J].Activation = activations
89-
m = np.mean(np.mean(activations,1))
90-
for K in range(activations.shape[0]):
91-
self.nodes[I][J].belief[K,0] = max(0, (activations[K,0] - 0.025))
92-
print self.nodes[0][0].belief"""
93-
"""
94-
def produce_belief(self, sqdiff):
95-
"""
96-
# Update belief state.
97-
"""
98-
normdist = np.sum(sqdiff / self.var, axis=1)
99-
chk = (normdist == 0)
100-
if any(chk):
101-
self.belief = np.zeros((1, self.CENTS))
102-
self.belief[chk] = 1.0
103-
else:
104-
normdist = 1 / normdist
105-
self.belief = (normdist / sum(normdist)).reshape(1, self.CENTS)
106-
107-
"""
66+
belief = (norm_dist / sum(norm_dist))
67+
self.belief = belief/sum(belief) # Make sure that beliefs are normalized

testDestin.py

+7-11
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,8 @@
11
__author__ = 'teddy'
2+
import cPickle as pickle
23
from Network import *
34
from loadData import *
4-
from time import time
5-
import cPickle as pickle
5+
66

77
"""
88
Here I don't move the image, I rather let a typical node move around the image
@@ -52,16 +52,13 @@
5252
DESTIN.setmode(network_mode)
5353
DESTIN.set_lowest_layer(0)
5454
# Load Data
55-
[data, labels] = loadCifar(10) # loads cifar_data_batch_1
55+
[data, labels] = loadCifar(10)
5656
del labels
5757
# data = np.random.rand(5,32*32*3)
5858
# Initialize Network; there is is also a layer-wise initialization option
5959
DESTIN.init_network()
60-
t = time()
6160
for I in range(data.shape[0]): # For Every image in the data set
6261
if I % 200 == 0:
63-
print time() - t
64-
t = time()
6562
print("Training Iteration Number %d" % I)
6663
for L in range(DESTIN.number_of_layers):
6764
if L == 0:
@@ -82,13 +79,12 @@
8279
print("Testing Started")
8380
network_mode = False
8481
DESTIN.setmode(network_mode)
82+
8583
# On the training set
8684
[data, labels] = loadCifar(10)
8785
del labels
8886
for I in range(data.shape[0]): # For Every image in the data set
8987
if I % 1000 == 0:
90-
print time() - t
91-
t = time()
9288
print("Testing Iteration Number %d" % I)
9389
for L in range(DESTIN.number_of_layers):
9490
if L == 0:
@@ -107,10 +103,10 @@
107103
file_id.close()
108104
# Get rid-off accumulated training beliefs
109105
DESTIN.clean_belief_exporter()
110-
# On the test set
106+
111107
[data, labels] = loadCifar(6)
112-
# data = np.random.rand(5,32*32*3)
113108
del labels
109+
# On the test set
114110
for I in range(data.shape[0]): # For Every image in the data set
115111
if I % 1000 == 0:
116112
print("Testing Iteration Number %d" % (I+50000))
@@ -130,4 +126,4 @@
130126
pickle.dump(np.array(DESTIN.network_belief['belief']), file_id)
131127
file_id.close()
132128
# Get rid-off accumulated training beliefs
133-
DESTIN.clean_belief_exporter()
129+
DESTIN.clean_belief_exporter()

0 commit comments

Comments
 (0)