-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
102 lines (77 loc) · 3.85 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from DatasetReader import read_hoda_dataset
import numpy as np
import tensorflow as tf
def load_data():
# Set seed
seed = 123
np.random.seed(seed)
# Loading dataset
print("Loading the whole dataset...")
x_train, y_train = read_hoda_dataset(dataset_path='./DigitDB/Train 60000.cdb',
images_height=32,
images_width=32,
one_hot=True,
reshape=True)
x_test, y_test = read_hoda_dataset(dataset_path='./DigitDB/Test 20000.cdb',
images_height=32,
images_width=32,
one_hot=True,
reshape=True)
# concat to a whole dataset
x, y = np.concatenate([x_train, x_test]), np.concatenate([y_train, y_test])
# shuffle images
idx = np.arange(x.shape[0])
np.random.shuffle(idx)
# set train and test size
train_size = int(0.7 * x.shape[0])
test_size = int(0.1 * x.shape[0])
# selecting indices of train, val, test set
train_idx = idx[:train_size]
val_idx = idx[train_size: -test_size]
test_idx = idx[-test_size:]
x_train = x[train_idx]
y_train = y[train_idx]
x_val = x[val_idx]
y_val = y[val_idx]
x_test = x[test_idx]
y_test = y[test_idx]
return x_train, y_train, x_val, y_val, x_test, y_test
def glorot_initializer(shape, name=None):
#####################################################################################
# TODO: Implement the uniform glorot initializer #
# First you have to define the range of initialization based on the shape of tensor #
# Then use tf.random_uniform to define initial value #
# Feed the initial value to tf.Variable #
# Return the defined tf.Variable #
#####################################################################################
init_range = np.sqrt(6.0 / (shape[0]+shape[1]))
initial = tf.random_uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
return tf.Variable(initial, name=name)
#####################################################################################
# END OF YOUR CODE #
#####################################################################################
def normal_initializer(shape, stddev, name=None):
#####################################################
# TODO: Implement the normal initializer #
# Set mean to zero and standard deviation to stddev #
# Use tf.truncated_normal to define initial value #
# Feed the initial value to a tf.Variable #
# Return the defined tf.Variable #
#####################################################
initial = tf.truncated_normal(shape, stddev=stddev)
return tf.Variable(initial, name=name)
#####################################################
# END OF YOUR CODE #
#####################################################
def zero_initializer(shape, name=None):
###########################################
# TODO: Implement the zero initializer #
# Use tf.zeros to define initial value #
# Feed the initial value to a tf.Variable #
# Return the defined tf.Variable #
###########################################
initial = tf.zeros(shape, dtype=tf.float32)
return tf.Variable(initial, name=name)
###########################################
# END OF YOUR CODE #
###########################################