-
Notifications
You must be signed in to change notification settings - Fork 2
/
unitarydnn.py
157 lines (134 loc) · 5.18 KB
/
unitarydnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
"""
Collection of functions related to unitary balanced Deep Neural Networks (DNNs).
"""
import tensorflow as tf
tf.keras.backend.set_floatx('float64')
import numpy as np
from tensorflow.keras import initializers
from tensorflow.keras import layers
from tensorflow.keras import Model
def randomUnitVector(n):
"""
Generate a random vector of length n over the real numbers with 2-norm equal
to 1.
Parameters
----------
n : int
Length of the vector.
Returns
-------
array
Vector of length n with 2-norm equal to 1.
"""
v = np.random.normal(size=n)
return v / np.linalg.norm(v)
def newRandomModel(inputShape, neuronsHiddenLayers, outputs, norm):
"""
Create a new DNN (Keras Model object) with the following characteristics:
- The input has shape inputShape;
- The number of hidden layers is the lenght of neuronsHiddenLayers and
the number of neurons in each hidden layer is given by the
corresponding entry in neuronsHiddenLayers;
- The number of outputs is outputs;
- The weights of each neuron are given by a vector chosen uniformly at
random whose norm is norm, and
- The bias of each neuron is zero.
Parameters
----------
inputShape : tuple
The shape of the input to the DNN.
neuronsHiddenLayers : array
List with the number of neurons in each hidden layer.
outputs : int
Number of outputs.
norm : float
Norm of the weight vector of the neurons.
Returns
-------
Model
Model object corresponding to a DNN with the characteristics above.
Example
-------
inputShape = (1024,)
neuronsHiddenLayers = [256] * 4
outputs = 10
model = deti.randomdnn.newRandomModel(inputShape, neuronsHiddenLayers, outputs, 1.0)
"""
# Input layer
x0 = layers.Input(shape=inputShape, name="input")
# Hidden layers
x = x0
for i, n in enumerate(neuronsHiddenLayers):
x = layers.Dense(n, activation="relu", name=f"denseLayer{i + 1}",
kernel_initializer=initializers.Zeros(),
bias_initializer=initializers.Zeros())(x)
# Output layer
x = layers.Dense(outputs, name="output",
kernel_initializer=initializers.Zeros(),
bias_initializer=initializers.Zeros())(x)
# Model
model = Model(inputs=x0, outputs=x)
# Set random vectors as weights
for layer in model.layers:
if type(layer) != layers.Dense:
continue
dim, n = layer.get_weights()[0].shape
weights = np.array([norm * randomUnitVector(dim) for i in range(n)]).T
layer.set_weights([weights, layer.get_weights()[1]])
return model
def newRandomBalancedModel(inputShape, neuronsHiddenLayers, outputs, norm, low=-1, high=1, samples=100000):
"""
Create a new DNN (Keras Model object) with the following characteristics:
- The input has shape inputShape;
- The number of hidden layers is the lenght of neuronsHiddenLayers and
the number of neurons in each hidden layer is given by the
corresponding entry in neuronsHiddenLayers;
- The number of outputs is outputs;
- The weights of each neuron are given by a vector chosen uniformly at
random whose norm is norm, and
- The bias of each neuron is chosen so that each neuron has a 50%
probability of being active.
Parameters
----------
inputShape : tuple
The shape of the input to the DNN.
neuronsHiddenLayers : array
List with the number of neurons in each hidden layer.
outputs : int
Number of outputs.
norm : float
Norm of the weight vector of the neurons.
low : float, optional
Lower bound for sampling inputs to the DNN when setting the bias.
high : float, optional
Upper bound for sampling inputs to the DNN when setting the bias.
smaples : int, optional
Number of sampled random inputs to the DNN when setting the bias.
Returns
-------
Model
Model object corresponding to a DNN with the characteristics above.
Example
-------
inputShape = (1024,)
neuronsHiddenLayers = [256] * 4
outputs = 10
model = deti.randomdnn.newRandomBalancedModel(inputShape, neuronsHiddenLayers, outputs, 1.0)
"""
# DNN with random unitary vectors as weights and zero biases
model = newRandomModel(inputShape, neuronsHiddenLayers, outputs, norm)
# Sample random inputs
Y = np.random.uniform(low=low, high=high, size=(samples, inputShape[0]))
# Set bias such that around 50% of the samples activate each neuron
for i in range(1, len(model.layers)):
weights, _ = model.layers[i].get_weights()
# The i-th column of Y contains the values for the i-th neuron
Y = np.matmul(Y, weights)
# Get biases from the median of the columns
biases = -np.median(Y, axis=0)
# Update biases on the model
model.layers[i].set_weights([weights, biases])
# Update Y for next layer
Y += biases[np.newaxis, :]
Y *= (Y > 0)
return model