-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtest_unet_srlm.py
164 lines (111 loc) · 5.26 KB
/
test_unet_srlm.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 9 09:35:44 2020
@author: sadhana-ravikumar
"""
import sys
sys.path.append('./utilities')
import config_srlm as config
import torch
import preprocess_data as p
from unet_model import UNet
from torch.utils.data import DataLoader
import torch.nn.functional as F
import numpy as np
import nibabel as nib
import os.path as osp
import os
c = config.Config_Unet()
dir_names = config.Config()
def computeGeneralizedDSC(gt, seg):
gt_seg = gt[gt > 0]
myseg = seg[gt > 0]
gdsc = 100*(sum(gt_seg == myseg)/ len(gt_seg))
return gdsc
def generate_prediction(output):
"""
Generates predictions based on the output of the network
"""
#convert output to probabilities
probability = F.softmax(output, dim = 1)
_, preds_tensor = torch.max(probability, 1)
preds = np.squeeze(preds_tensor.cpu().numpy())
return preds, probability
## Set up GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Load model or not. If load model, the modelDir and tfboardDir should have existed. Otherwise they
# will be created forcefully, wiping off the old one.
load_model = True
#Set up directories
root_dir = dir_names.root_dir
experiment_name = "Experiment_08062020_prior"
tfboard_dir = dir_names.tfboard_dir + '/' + experiment_name
model_dir = dir_names.model_dir + '/' + experiment_name + '/'
test_dir = dir_names.test_dir + '/' + experiment_name + '/'
if not os.path.exists(test_dir):
os.makedirs(test_dir)
if not load_model:
c.force_create(model_dir)
c.force_create(tfboard_dir)
#Define image dataset (reads in full images and segmentations)
test_dataset = p.ImageDataset_withPrior(csv_file = c.final_test_csv)
num_class = 3
model_file = model_dir + 'model_17.pth'
net = UNet(num_class, in_channels = 2)
net.load_state_dict(torch.load(model_file, map_location=device))
net.eval()
net.to(device)
pad_size = c.half_patch[0]
include_prior = True
with torch.no_grad():
for i in range(6,len(test_dataset)):
sample = test_dataset[i]
if include_prior:
prior = sample['prior']
test_patches = p.GeneratePatches(sample, is_training = False, transform =False, prior = prior)
else:
test_patches = p.GeneratePatches(sample, is_training = False, transform =False, prior = None)
testloader = DataLoader(test_patches, batch_size = c.batch_size, shuffle = False, num_workers = c.num_thread)
image_id = sample['id']
print("Generating test patches for ", image_id )
image_shape = sample['image'].shape
affine = sample['affine']
## For assembling image
im_shape_pad = [x + pad_size*2 for x in image_shape]
prob = np.zeros([num_class] + list(im_shape_pad))
rep = np.zeros([num_class] + list(im_shape_pad))
pred_list = []
for j, patch_batched in enumerate(testloader):
print("batch", j)
img = patch_batched['image'].to(device)
#img = patch_batched['image'][:,None,...].to(device)
seg = patch_batched['seg'].to(device)
cpts = patch_batched['cpt']
output = net(img)
probability = F.softmax(output, dim = 1).cpu().numpy()
#Crop the patch to only use the center part
probability = probability[:,:,c.patch_crop_size:-c.patch_crop_size,c.patch_crop_size:-c.patch_crop_size,c.patch_crop_size:-c.patch_crop_size]
## Assemble image in loop!
n, C, hp, wp, dp = probability.shape
half_shape = torch.tensor([hp, wp,dp])/2
# half_shape = half_shape.astype(int)
hs, ws, ds = half_shape
for cpt, pred in zip(list(cpts), list(probability)):
#if np.sum(pred)/hs/ws/ds < 0.1:
prob[:,cpt[0] - hs:cpt[0] + hs, cpt[1] - ws:cpt[1] + ws, cpt[2] - ds:cpt[2] + ds] += pred
rep[:,cpt[0] - hs:cpt[0] + hs, cpt[1] - ws:cpt[1] + ws, cpt[2] - ds:cpt[2] + ds] += 1
# pred_list.append((probability, cpts))
#Crop the image since we added padding when generating patches
prob = prob[:,pad_size:-pad_size, pad_size:-pad_size,pad_size:-pad_size]
rep = rep[:,pad_size:-pad_size,pad_size:-pad_size,pad_size:-pad_size]
rep[rep==0] = 1e-6
# Normalized by repetition
prob = prob/rep
seg_pred = np.argmax(prob, axis = 0).astype('float')
prob = np.moveaxis(prob,0,-1)
gdsc = computeGeneralizedDSC(sample['seg'], seg_pred)
print("Prediction accuracy", gdsc)
nib.save(nib.Nifti1Image(prob, affine), osp.join(test_dir, "prob_" + str(image_id) + ".nii.gz"))
nib.save(nib.Nifti1Image(seg_pred, affine), osp.join(test_dir, "seg_" + str(image_id) + ".nii.gz" ))
print("Done!")