-
Notifications
You must be signed in to change notification settings - Fork 0
/
utils.py
101 lines (92 loc) · 3.96 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import torch
import math
import torch.nn as nn
from tqdm import tqdm
from models.GetModel import build_promptmodel
from peft import inject_adapter_in_model, LoraConfig, get_peft_model,get_peft_model_state_dict
class foundationmodel(nn.Module):
def __init__(self,layer = 12,num_classes=10,depth_cls=0,modeltype = 'ViT',lora_config = None):
super(foundationmodel, self).__init__()
self.back = build_promptmodel(num_classes=num_classes, edge_size=224, modeltype=modeltype, patch_size=16,Prompt_Token_num=0, depth = layer,depth_cls=depth_cls)
self.back = get_peft_model(self.back, lora_config)
def forward(self, x ):
x = self.back(x)
return x
def lrcos(step=0,lr=0.01,lr_min=0.0001,T_max=500):
return 0.5*(1 + math.cos(math.pi * step / T_max)) *(lr - lr_min) + lr_min
def evaluation_depthfl(model, testdata):
model.eval()
top1s,topks = [],[]
if type(testdata)==list:
for test_data in tqdm(testdata):
with torch.no_grad():
total = 0
top1 = 0
topk = 0
for (test_imgs, test_labels) in test_data:
test_labels = test_labels.cuda()
outs = model(test_imgs.cuda())
out = outs[-1]
# out = torch.stack(outs, dim=2)
# out = torch.sum(out, dim=2) / len(outs)
_,maxk = torch.topk(out,5,dim=-1)
total += test_labels.size(0)
test_labels = test_labels.view(-1,1)
top1 += (test_labels == maxk[:,0:1]).sum().item()
topk += (test_labels == maxk).sum().item()
top1s.append(100*top1/total)
topks.append(100*topk/total)
return top1s,topks
# sum(top1s)/len(top1s),sum(topks)/len(topks)
else:
with torch.no_grad():
total = 0
top1 = 0
topk = 0
for (test_imgs, test_labels) in testdata:
test_labels = test_labels.cuda()
outs = model(test_imgs.cuda())
out = outs[-1]
# out = torch.stack(outs, dim=2)
# out = torch.sum(out, dim=2) / len(outs)
_,maxk = torch.topk(out,5,dim=-1)
total += test_labels.size(0)
test_labels = test_labels.view(-1,1)
top1 += (test_labels == maxk[:,0:1]).sum().item()
topk += (test_labels == maxk).sum().item()
return 100 * top1 / total,100*topk/total
def evaluation(model, testdata):
model.eval()
top1s,topks = [],[]
if type(testdata)==list:
for test_data in tqdm(testdata):
with torch.no_grad():
total = 0
top1 = 0
topk = 0
for (test_imgs, test_labels) in test_data:
test_labels = test_labels.cuda()
out = model(test_imgs.cuda())
_,maxk = torch.topk(out,5,dim=-1)
total += test_labels.size(0)
test_labels = test_labels.view(-1,1)
top1 += (test_labels == maxk[:,0:1]).sum().item()
topk += (test_labels == maxk).sum().item()
top1s.append(100*top1/total)
topks.append(100*topk/total)
return top1s,topks
# sum(top1s)/len(top1s),sum(topks)/len(topks)
else:
with torch.no_grad():
total = 0
top1 = 0
topk = 0
for (test_imgs, test_labels) in testdata:
test_labels = test_labels.cuda()
out = model(test_imgs.cuda())
_,maxk = torch.topk(out,5,dim=-1)
total += test_labels.size(0)
test_labels = test_labels.view(-1,1)
top1 += (test_labels == maxk[:,0:1]).sum().item()
topk += (test_labels == maxk).sum().item()
return 100 * top1 / total,100*topk/total