-
Notifications
You must be signed in to change notification settings - Fork 2
/
model_evaluation.py
114 lines (92 loc) · 3.41 KB
/
model_evaluation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
"""
# Author: Nag Mani
# Created: 2/18/2019
"""
import time
import torch
def test_accuracy(test_loader, model, args):
start = time.time()
correct = 0
total = 0
model.eval()
print("here")
with torch.no_grad():
for data in test_loader:
images, labels = data
images, labels = images.cuda(args.gpu), labels.cuda(args.gpu)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print("Total vs correct = {:.2f}/{:.2f}".format(correct, total))
acc = 100 * correct / total
print('Accuracy of the network on the {} test images: {:.2f} %' .format(total,
100 * correct / total))
end = time.time()
print("Execution time: ", end-start)
return acc
def validate(data_loader, model, criterion, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(data_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
# acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = accuracy(output, target)
losses.update(loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# top5.update(acc5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# if i % args.print_freq == 0:
# print('Test: [{0}/{1}]\t'
# 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
# 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
# i, len(val_loader), batch_time=batch_time, loss=losses, top1=top1))
# # top5=top5 # 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'
# print(' * Acc@1 {top1.avg:.3f} '
# # 'Acc@5 {top5.avg:.3f}'
# .format(top1=top1
# # top5=top5
# ))
print(top1.avg)
return top1.avg
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res