-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathModel.py
122 lines (103 loc) · 3.83 KB
/
Model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from torchvision.models import resnet50
import torch
import torch.nn as nn
from torch.nn import Module
# initialize
def weights_init_kaiming(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
#nn.init.constant_(m.bias, 0.0)
elif classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
if m.bias is not None:
nn.init.constant_(m.bias, 0.0)
elif classname.find('BatchNorm') != -1:
if m.affine:
nn.init.constant_(m.weight, 1.0)
nn.init.constant_(m.bias, 0.0)
def weights_init_classifier(m):
classname = m.__class__.__name__
if classname.find('Linear') != -1:
nn.init.normal_(m.weight, std=0.001)
if m.bias:
nn.init.constant_(m.bias, 0.0)
# Defines the new fc layer and classification layer
# |--Linear--|--bn--|--relu--|--Linear--|
class ClassBlock(nn.Module):
def __init__(self, input_dim, class_num, relu=True, num_bottleneck=512):
super(ClassBlock, self).__init__()
#add_block = []
add_block1 = []
add_block2 = []
add_block1 += [nn.BatchNorm1d(input_dim)]
if relu:
add_block1 += [nn.LeakyReLU(0.1)]
add_block1 += [nn.Linear(input_dim, num_bottleneck, bias=False)]
add_block2 += [nn.BatchNorm1d(num_bottleneck)]
#add_block = nn.Sequential(*add_block)
# add_block.apply(weights_init_kaiming)
add_block1 = nn.Sequential(*add_block1)
add_block1.apply(weights_init_kaiming)
add_block2 = nn.Sequential(*add_block2)
add_block2.apply(weights_init_kaiming)
classifier = []
classifier += [nn.Linear(num_bottleneck, class_num, bias=False)]
classifier = nn.Sequential(*classifier)
classifier.apply(weights_init_classifier)
self.add_block1 = add_block1
self.add_block2 = add_block2
self.classifier = classifier
def forward(self, x):
x = self.add_block1(x)
x1 = self.add_block2(x)
x2 = self.classifier(x1)
return x, x1, x2
# model
class Train_Model(Module):
def __init__(self):
super(Train_Model, self).__init__()
#self.part = 6
self.baseline = resnet50(pretrained=True)
#self.dropout = nn.Dropout(0.5)
# Last Stride
self.baseline.layer4[0].conv2.stride = (1, 1)
self.baseline.layer4[0].downsample[0].stride = (1, 1)
self.baseline.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.baseline.fc = nn.Linear(2048, 751, False)
self.baseline.fc.apply(weights_init_classifier)
self.BN = nn.BatchNorm1d(2048)
self.BN.bias.requires_grad_(False)
self.BN.apply(weights_init_kaiming)
def forward(self, x):
# baseline
x = self.baseline.conv1(x)
x = self.baseline.bn1(x)
x = self.baseline.relu(x)
x = self.baseline.maxpool(x)
x = self.baseline.layer1(x)
x = self.baseline.layer2(x)
x = self.baseline.layer3(x)
x = self.baseline.layer4(x)
# BNNeck
ft = self.baseline.avgpool(x).squeeze()
fi = self.BN(ft)
out = self.baseline.fc(fi)
return ft, fi, out
class Test_Model(Module):
def __init__(self, model):
super(Test_Model, self).__init__()
self.model = model
def forward(self, x):
x = self.model.baseline.conv1(x)
x = self.model.baseline.bn1(x)
x = self.model.baseline.relu(x)
x = self.model.baseline.maxpool(x)
x = self.model.baseline.layer1(x)
x = self.model.baseline.layer2(x)
x = self.model.baseline.layer3(x)
x = self.model.baseline.layer4(x)
# BNNeck
ft = self.model.baseline.avgpool(x).squeeze()
fi = self.model.BN(ft)
return fi