-
Notifications
You must be signed in to change notification settings - Fork 0
/
lingon_eval_utils.py
70 lines (58 loc) · 3.02 KB
/
lingon_eval_utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import numpy as np
import matplotlib.pyplot as plt
import h5py
def print_mislabeled_images(classes, X, y, p):
"""
Plots images where predictions and truth were different.
X -- dataset
y -- true labels
p -- predictions
"""
a = p + y
mislabeled_indices = np.asarray(np.where(a == 1))
plt.rcParams['figure.figsize'] = (40.0, 40.0) # set default size of plots
num_images = len(mislabeled_indices[0])
col = 15
row = (num_images//col)+1
for i in range(num_images):
index = mislabeled_indices[1][i]
plt.subplot(row, col, i + 1)
#plt.subplot(2, num_images, i + 1)
plt.imshow(X[:,index].reshape(64,64,3), interpolation='nearest')
plt.axis('off')
#plt.title("Prediction: " + classes[int(p[0,index])] + " \n Class: " + classes[y[0,index]])
plt.title("Prediction: " + classes[int(p[0,index])].decode("utf-8") + " \n Class: " + classes[y[0,index]].decode("utf-8"))
def precision_recall (predictions, y_truth, evaluation_set = "Evaluation set"):
"""
Calclulates precision and recall
Parameters:
predictions -- the predicted y-values from dataset
y_truth -- the truth lables (1/0) from the dataset
"""
#Identify errors
diff_values = np.squeeze(predictions)- np.squeeze(y_truth)
true_lingon_list = [true_lingon for true_lingon in np.squeeze(y_truth) if true_lingon == 1]
true_lingons = len(true_lingon_list)
# Diff value ==1 means false positive i.e. prediction lingon = 1 , but truth is 0 = no-lingon
#Correct prediction i.e. True positive = All positives - False positves
all_pred_pos_list = [y_pred for y_pred in np.squeeze(predictions) if y_pred==1]
false_pred_pos_list = [y_diff for y_diff in diff_values if y_diff == 1]
false_pred_neg_list = [y_diff for y_diff in diff_values if y_diff == -1]
all_pred_pos = len(all_pred_pos_list) # Number of predicted as positve i.e. Y_pred = 1
false_pred_pos = len(false_pred_pos_list) # Number of falsly predicted positve
true_pred_pos = all_pred_pos - false_pred_pos # Number of correctly predicted positive
false_pred_neg = len(false_pred_neg_list) #Number of true positivs falsly predicted negative
#Precision
precision = true_pred_pos/all_pred_pos # Share of the ones that were predicted lingon was truely lingon
# Recall
recall = true_pred_pos/(true_pred_pos + false_pred_neg) # Share of all true lingon that was correctly predicted as lingon
precision_txt = "Precision: {:.0%}"
recall_txt ="Recall: {:.0%}"
print(evaluation_set + " set evaluation metrics")
print(precision_txt.format(precision))
print(recall_txt.format(recall))
print("Lingon in trainset: ", true_lingons)
print("Predicted to be lingon: ", all_pred_pos)
print("Incorrectly predicted to be a lingon:", false_pred_pos)
print("Correctly predicted to be a lingon: ", true_pred_pos)
print("Incorrectly predicted not to be a lingon (icke-lingon): ", false_pred_neg)