forked from hanzhu97702/IEEE_TGRS_MUNet
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
210 lines (176 loc) · 8.63 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from evaluation import compute_rmse, compute_sad
from utils import print_args, SparseLoss, NonZeroClipper, MinVolumn
from data_loader import set_loader
from model import Init_Weights, MUNet
import matplotlib.pyplot as plt
import scipy.io as sio
import numpy as np
import argparse
import random
import time
import os
import logging
def get_logger(filename, verbosity=1, name=None):
level_dict = {0: logging.DEBUG, 1: logging.INFO, 2: logging.WARNING}
formatter = logging.Formatter(
"[%(asctime)s][%(filename)s][line:%(lineno)d][%(levelname)s] %(message)s"
)
logger = logging.getLogger(name)
logger.setLevel(level_dict[verbosity])
fh = logging.FileHandler(filename, "w")
fh.setFormatter(formatter)
logger.addHandler(fh)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
parser = argparse.ArgumentParser()
parser.add_argument('--fix_random', action='store_true', help='fix randomness')
parser.add_argument('--seed', default=0, type=int)
# parser.add_argument('--gpu_id', default='0,1,2', help='gpu id')
parser.add_argument('--batch_size', default=128, type=int, help='batch size')
parser.add_argument('--patch', default=5, type=int, help='input data size')
parser.add_argument('--learning_rate_en', default=3e-4, type=float, help='learning rate of encoder')
parser.add_argument('--learning_rate_de', default=1e-4, type=float, help='learning rate of decoder')
parser.add_argument('--weight_decay', default=1e-5, type=float, help='network parameter regularization')
parser.add_argument('--lamda', default=0, type=float, help='sparse regularization')
parser.add_argument('--reduction', default=2, type=int, help='squeeze reduction')
parser.add_argument('--delta', default=0, type=float, help='delta coefficient')
parser.add_argument('--gamma', default=0.8, type=float, help='learning rate decay')
parser.add_argument('--epoch', default=200, type=int, help='number of epoch')
parser.add_argument('--dataset', choices=['muffle','houston170'], default='muffle', help='dataset to use')
args = parser.parse_args()
if __name__ == '__main__':
# os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
# torch.cuda.set_device(2)
# if torch.cuda.is_available():
# print ('GPU is true')
# print('cuda version: {}'.format(torch.version.cuda))
# else:
# print('CPU is true')
if args.fix_random:
# init seed within each thread
manualSeed = args.seed
np.random.seed(manualSeed)
random.seed(manualSeed)
torch.manual_seed(manualSeed)
torch.cuda.manual_seed(manualSeed)
torch.cuda.manual_seed_all(manualSeed)
# NOTE: literally you should uncomment the following, but slower
cudnn.deterministic = True
cudnn.benchmark = False
print('Warning: You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
else:
cudnn.benchmark = True
# print("Using GPU: {}".format(args.gpu_id))
# create dataset and model
train_loaders, test_loaders, label, M_init, M_true, num_classes, band, col, row, ldr_dim = set_loader(args)
net = MUNet(band, num_classes, ldr_dim, args.reduction).to(device)
# initialize net parameters and endmembers
if args.dataset == 'muffle':
position = np.array([0,2,1,3,4]) # muffle
Init_Weights(net,'xavier', 1)
elif args.dataset == 'houston170':
position = np.array([0,1,2,3]) # houston170
Init_Weights(net,'xavier', 1)
net_dict = net.state_dict()
net_dict['decoder.0.weight'] = M_init
net.load_state_dict(net_dict)
# loss funtion and regularization
apply_nonegative = NonZeroClipper()
loss_func = nn.MSELoss()
criterionSparse = SparseLoss(args.lamda)
criterionVolumn = MinVolumn(band, num_classes, args.delta)
# optimizer setting
params = map(id, net.decoder.parameters())
ignored_params = list(set(params))
base_params = filter(lambda p: id(p) not in ignored_params, net.parameters())
optimizer = torch.optim.Adam([{'params': base_params},{'params': net.decoder.parameters(), 'lr': args.learning_rate_de}],
lr = args.learning_rate_en, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=args.gamma)
logger = get_logger('Log/train.log')
time_start = time.time()
logger.info('start training!')
for epoch in range(args.epoch):
for i, traindata in enumerate(train_loaders):
net.train()
x, y = traindata
# x = x.cuda()
# y = y.cuda()
x = x.to(device)
y = y.to(device)
# x,y都作为训练数据,x表示HSI,y表示Lidar
abu, output = net(x,y)
# abu表示丰度,output表示ReC
output = torch.reshape(output, (output.shape[0], band))
x = torch.reshape(x, (output.shape[0], band))
# reconstruction loss
MSE_loss = torch.mean(torch.acos(torch.sum(x * output, dim=1)/
(torch.norm(output, dim=1, p=2)*torch.norm(x, dim=1, p=2))))
# sparsity and minimum volume regularization
MSE_loss += criterionSparse(abu) + criterionVolumn(net.decoder[0].weight)
optimizer.zero_grad()
MSE_loss.backward()
nn.utils.clip_grad_norm_(net.parameters(), max_norm=10, norm_type=1)
optimizer.step()
net.decoder.apply(apply_nonegative)
if epoch % 10 == 0:
# print('Epoch: {:d} | Train Unmix Loss: {:.5f} | RE Loss: {:.5f} | Sparsity Loss: {:.5f} | Minvol: {:.5f}'
# .format(epoch, MSE_loss, loss_func(output, x), criterionSparse(abu), criterionVolumn(net.decoder[0].weight)))
logger.info('training stage')
logger.info('Epoch: {:d} | Train Unmix Loss: {:.5f} | RE Loss: {:.5f} | Sparsity Loss: {:.5f} | Minvol: {:.5f}'
.format(epoch, MSE_loss, loss_func(output, x), criterionSparse(abu), criterionVolumn(net.decoder[0].weight)))
net.eval()
for k, testdata in enumerate(test_loaders):
x, y = testdata
x = x.to(device)
y = y.to(device)
abu_est, output = net(x, y)
abu_est = torch.reshape(abu_est.squeeze(-1).permute(2,1,0), (num_classes,row,col)).permute(0,2,1).cpu().data.numpy()
edm_result = torch.reshape(net.decoder[0].weight, (band,num_classes)).cpu().data.numpy()
logger.info('validation')
logger.info('RMSE: {:.5f} | SAD: {:.5f}'.format(compute_rmse(abu_est[position,:,:],label), compute_sad(M_true, edm_result[:,position])))
# print('RMSE: {:.5f} | SAD: {:.5f}'.format(compute_rmse(abu_est[position,:,:],label), compute_sad(M_true, edm_result[:,position])))
print('**********************************')
scheduler.step()
logger.info('finish training!')
time_end = time.time()
# model evaluation
net.eval()
print(net.spectral_se)
for i, testdata in enumerate(test_loaders):
x, y = testdata
x = x.to(device)
y = y.to(device)
abu, output = net(x, y)
# compute metric
abu_est = torch.reshape(abu.squeeze(-1).permute(2,1,0), (num_classes,row,col)).permute(0,2,1).cpu().data.numpy()
edm_result = torch.reshape(net.decoder[0].weight, (band,num_classes)).cpu().data.numpy()
abu_est = abu_est[position,:,:]
edm_result = edm_result[:,position]
RMSE = compute_rmse(label, abu_est)
SAD = compute_sad(M_true, edm_result)
print('**********************************')
print('RMSE: {:.5f} | SAD: {:.5f}'.format(RMSE, SAD))
print('**********************************')
print('total computational cost:', time_end-time_start)
print('**********************************')
# abundance map
for i in range(abu_est.shape[0]):
plt.subplot(2, num_classes, i+1)
plt.imshow(abu_est[i,:,:])
plt.subplot(2, num_classes, i+1+num_classes)
plt.imshow(label[i,:,:])
plt.show()
# print hyperparameter setting and save result
print_args(vars(args))
save_path = str(args.dataset) + '_result.mat'
sio.savemat(save_path, {'abu_est':abu_est.T, 'M_est':edm_result})