Skip to content

Commit

Permalink
Ch 4 Update: kernel fix and add eval code
Browse files Browse the repository at this point in the history
  • Loading branch information
nfmcclure committed Apr 9, 2018
1 parent 7dd2714 commit ec16de2
Show file tree
Hide file tree
Showing 9 changed files with 771 additions and 496 deletions.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@
from tensorflow.python.framework import ops
ops.reset_default_graph()

# Set random seeds
np.random.seed(7)
tf.set_random_seed(7)

# Create graph
sess = tf.Session()

Expand All @@ -31,7 +35,7 @@

# Split data into train/test sets
train_indices = np.random.choice(len(x_vals),
round(len(x_vals)*0.8),
round(len(x_vals)*0.9),
replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_vals_train = x_vals[train_indices]
Expand All @@ -40,7 +44,7 @@
y_vals_test = y_vals[test_indices]

# Declare batch size
batch_size = 100
batch_size = 135

# Initialize placeholders
x_data = tf.placeholder(shape=[None, 2], dtype=tf.float32)
Expand Down

Large diffs are not rendered by default.

159 changes: 102 additions & 57 deletions 04_Support_Vector_Machines/04_Working_with_Kernels/04_svm_kernels.ipynb

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
Expand Up @@ -137,3 +137,19 @@
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()

# Evaluate on new/unseen data points
# New data points:
new_points = np.array([(-0.75, -0.75),
(-0.5, -0.5),
(-0.25, -0.25),
(0.25, 0.25),
(0.5, 0.5),
(0.75, 0.75)])

[evaluations] = sess.run(prediction, feed_dict={x_data: x_vals,
y_target: np.transpose([y_vals]),
prediction_grid: new_points})

for ix, p in enumerate(new_points):
print('{} : class={}'.format(p, evaluations[ix]))

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
# Nonlinear SVM Example
#----------------------------------
#
# This function wll illustrate how to
# implement the gaussian kernel on
Expand All @@ -22,11 +21,11 @@
# iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)]
iris = datasets.load_iris()
x_vals = np.array([[x[0], x[3]] for x in iris.data])
y_vals = np.array([1 if y==0 else -1 for y in iris.target])
class1_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==1]
class1_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==1]
class2_x = [x[0] for i,x in enumerate(x_vals) if y_vals[i]==-1]
class2_y = [x[1] for i,x in enumerate(x_vals) if y_vals[i]==-1]
y_vals = np.array([1 if y == 0 else -1 for y in iris.target])
class1_x = [x[0] for i, x in enumerate(x_vals) if y_vals[i] == 1]
class1_y = [x[1] for i, x in enumerate(x_vals) if y_vals[i] == 1]
class2_x = [x[0] for i, x in enumerate(x_vals) if y_vals[i] == -1]
class2_y = [x[1] for i, x in enumerate(x_vals) if y_vals[i] == -1]

# Declare batch size
batch_size = 150
Expand All @@ -37,7 +36,7 @@
prediction_grid = tf.placeholder(shape=[None, 2], dtype=tf.float32)

# Create variables for svm
b = tf.Variable(tf.random_normal(shape=[1,batch_size]))
b = tf.Variable(tf.random_normal(shape=[1, batch_size]))

# Gaussian (RBF) kernel
gamma = tf.constant(-25.0)
Expand All @@ -52,13 +51,13 @@
loss = tf.negative(tf.subtract(first_term, second_term))

# Gaussian (RBF) prediction kernel
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1),[-1,1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1),[-1,1])
rA = tf.reshape(tf.reduce_sum(tf.square(x_data), 1), [-1, 1])
rB = tf.reshape(tf.reduce_sum(tf.square(prediction_grid), 1), [-1, 1])
pred_sq_dist = tf.add(tf.subtract(rA, tf.multiply(2., tf.matmul(x_data, tf.transpose(prediction_grid)))), tf.transpose(rB))
pred_kernel = tf.exp(tf.multiply(gamma, tf.abs(pred_sq_dist)))

prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target),b), pred_kernel)
prediction = tf.sign(prediction_output-tf.reduce_mean(prediction_output))
prediction_output = tf.matmul(tf.multiply(tf.transpose(y_target), b), pred_kernel)
prediction = tf.sign(prediction_output - tf.reduce_mean(prediction_output))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.squeeze(prediction), tf.squeeze(y_target)), tf.float32))

# Declare optimizer
Expand All @@ -83,11 +82,11 @@

acc_temp = sess.run(accuracy, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid:rand_x})
prediction_grid: rand_x})
batch_accuracy.append(acc_temp)

if (i+1)%75==0:
print('Step #' + str(i+1))
if (i + 1) % 75 == 0:
print('Step #' + str(i + 1))
print('Loss = ' + str(temp_loss))

# Create a mesh to plot points in
Expand All @@ -96,9 +95,9 @@
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.02),
np.arange(y_min, y_max, 0.02))
grid_points = np.c_[xx.ravel(), yy.ravel()]
[grid_predictions] = sess.run(prediction, feed_dict={x_data: rand_x,
y_target: rand_y,
prediction_grid: grid_points})
[grid_predictions] = sess.run(prediction, feed_dict={x_data: x_vals,
y_target: np.transpose([y_vals]),
prediction_grid: grid_points})
grid_predictions = grid_predictions.reshape(xx.shape)

# Plot points and grid
Expand Down
Loading

0 comments on commit ec16de2

Please sign in to comment.