-
Notifications
You must be signed in to change notification settings - Fork 13
/
tf_utils.py
181 lines (141 loc) · 5.6 KB
/
tf_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
import keras.backend as K
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from attack_utils import gen_adv_loss
from keras.models import save_model
import time
import sys
FLAGS = flags.FLAGS
EVAL_FREQUENCY = 1000
BATCH_SIZE = 64
BATCH_EVAL_NUM = 100
def batch_eval(tf_inputs, tf_outputs, numpy_inputs):
"""
A helper function that computes a tensor on numpy inputs by batches.
From: https://github.com/openai/cleverhans/blob/master/cleverhans/utils_tf.py
"""
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in range(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
for start in range(0, m, BATCH_SIZE):
batch = start // BATCH_SIZE
# Compute batch start and end indices
start = batch * BATCH_SIZE
end = start + BATCH_SIZE
numpy_input_batches = [numpy_input[start:end]
for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= BATCH_SIZE
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
feed_dict[K.learning_phase()] = 0
numpy_output_batches = K.get_session().run(tf_outputs,
feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
def tf_train(x, y, model, X_train, Y_train, generator, x_advs=None, benign = None, cross_lip=None):
old_vars = set(tf.global_variables())
train_size = Y_train.shape[0]
# Generate cross-entropy loss for training
logits = model(x)
#print(K.int_shape(logits))
preds = K.softmax(logits)
l1 = gen_adv_loss(logits, y, mean=True)
# add adversarial training loss
if x_advs is not None:
idx = tf.placeholder(dtype=np.int32)
logits_adv = model(tf.stack(x_advs)[idx])
l2 = gen_adv_loss(logits_adv, y, mean=True)
if benign == 0:
loss = l2
elif benign == 1:
loss = 0.5*(l1+l2)
else:
l2 = tf.constant(0)
loss = l1
optimizer = tf.train.AdamOptimizer().minimize(loss)
saver = tf.train.Saver(set(tf.global_variables()) - old_vars)
# Run all the initializers to prepare the trainable parameters.
K.get_session().run(tf.initialize_variables(
set(tf.global_variables()) - old_vars))
start_time = time.time()
print('Initialized!')
# Loop through training steps.
num_steps = int(FLAGS.NUM_EPOCHS * train_size + BATCH_SIZE - 1) // BATCH_SIZE
step = 0
training_loss = 0
epoch_count = 0
step_old = 0
for (batch_data, batch_labels) \
in generator.flow(X_train, Y_train, batch_size=BATCH_SIZE):
if len(batch_data) < BATCH_SIZE:
k = BATCH_SIZE - len(batch_data)
batch_data = np.concatenate([batch_data, X_train[0:k]])
batch_labels = np.concatenate([batch_labels, Y_train[0:k]])
feed_dict = {x: batch_data,
y: batch_labels,
K.learning_phase(): 1}
# choose source of adversarial examples at random
# (for ensemble adversarial training)
if x_advs is not None:
feed_dict[idx] = np.random.randint(len(x_advs))
# Run the graph
_, curr_loss, curr_l1, curr_l2, curr_preds, _ = \
K.get_session().run([optimizer, loss, l1, l2, preds]
+ [model.updates],
feed_dict=feed_dict)
training_loss += curr_loss
epoch = float(step) * BATCH_SIZE / train_size
if epoch >= epoch_count:
epoch_count += 1
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.2f s' %
(step, float(step) * BATCH_SIZE / train_size,
elapsed_time))
print('Training loss: %.3f' % (training_loss/(step - step_old)))
training_loss = 0
step_old = step
print('Minibatch loss: %.3f (%.3f, %.3f)' % (curr_loss, curr_l1, curr_l2))
_, _, minibatch_error = error_rate(curr_preds, batch_labels)
print('Minibatch error: %.1f%%' % minibatch_error)
# if epoch % 10 == 0 or (step == (num_steps-1)):
# save_path = saver.save(K.get_session(), "/tmp/model.ckpt")
# save_model(model, 'tmp/model.ckpt')
# print("Model saved in file: %s" % 'model.ckpt')
sys.stdout.flush()
step += 1
if step == num_steps:
break
def tf_test_error_rate(model, x, X_test, y_test):
"""
Compute test error.
"""
assert len(X_test) == len(y_test)
# Predictions for the test set
eval_prediction = K.softmax(model(x))
predictions = batch_eval([x], [eval_prediction], [X_test])[0]
return error_rate(predictions, y_test)
def error_rate(predictions, labels):
"""
Return the error rate in percent.
"""
assert len(predictions) == len(labels)
preds = np.argmax(predictions, 1)
orig = np.argmax(labels, 1)
error_rate = 100.0 - (100.0 * np.sum(preds == orig) / predictions.shape[0])
return preds, orig, error_rate