-
Notifications
You must be signed in to change notification settings - Fork 4
/
crop_output.py
168 lines (144 loc) · 6.06 KB
/
crop_output.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
#!/usr/bin/env python
u"""
crop_output.py
by Yara Mohajerani (last update 10/2018)
remove padding and recrop to get back to original size.
remove faint points
History
10/2018 Update augment string
Add option for label width
Apply to both 'test' and 'train' output
09/2018 Written
"""
import os
import numpy as np
import imp
import sys
from glob import glob
from PIL import Image
import scipy.misc
def post_process(parameters):
glacier = parameters['GLACIER_NAME']
n_batch = int(parameters['BATCHES'])
n_epochs = int(parameters['EPOCHS'])
n_layers = int(parameters['LAYERS_DOWN'])
n_init = int(parameters['N_INIT'])
suffix = parameters['SUFFIX']
drop = float(parameters['DROPOUT'])
imb_str = '_%.2fweight'%(float(parameters['imb_str']))
at = float(parameters['THRESHOLD'])
if at != 0:
threshold_str = '%.2fthreshold'%at
else:
threshold_str = 'nothreshold'
#-- set up configurations based on parameters
if parameters['AUGMENT'] in ['Y','y']:
aug_config = np.int(parameters['AUG_CONFIG'])
aug_str = '_augment-x%i'%aug_config
else:
aug_config = 0
aug_str = ''
if parameters['CROP'] in ['Y','y']:
crop_str = '_cropped'
else:
crop_str = ''
if parameters['NORMALIZE'] in ['y','Y']:
norm_str = '_normalized'
else:
norm_str = ''
if parameters['LINEAR'] in ['Y','Y']:
lin_str = '_linear'
else:
lin_str = ''
drop_str = ''
if drop>0:
drop_str = '_w%.1fdrop'%drop
if parameters['LABEL_WIDTH'] == '3':
lbl_width = ''
else:
lbl_width = '_%ipx'%int(parameters['LABEL_WIDTH'])
#-- directory setup
#- current directory
current_dir = os.path.dirname(os.path.realpath(__file__))
main_dir = os.path.join(current_dir,'..','FrontLearning_data')
glacier_ddir = os.path.join(main_dir,'%s.dir'%glacier)
data_dir = os.path.join(glacier_ddir, 'data')
ddir = {}
ddir['train'] = os.path.join(data_dir,'train')
ddir['test'] = os.path.join(data_dir,'test')
for d in ['test','train']:
#-- read in output data of the neural network
subdir = os.path.join(ddir[d],'output_%ibatches_%iepochs_%ilayers_%iinit%s%s%s%s%s%s%s%s'\
%(n_batch,n_epochs,n_layers,n_init,lin_str,imb_str,drop_str,norm_str,aug_str,suffix,crop_str,lbl_width))
print subdir
#-- also get the unpadded but cropped directory
unpadded_subdir = os.path.join(ddir[d],'images%s%s'%(suffix,crop_str))
#-- get a sample file for dimensions
uncropped_list = glob(os.path.join(unpadded_subdir,'*.png'))
orig_shape = np.array(Image.open(uncropped_list[0]).convert('L')).shape
#-- get a list of the input files
in_list = sorted([fn for fn in glob(os.path.join(subdir,'*png'))
if (not os.path.basename(fn).endswith('postprocess.png') and not os.path.basename(fn).endswith('threshold.png'))])
n_files = len(in_list)
filenames = [os.path.basename(i) for i in in_list]
print(filenames)
h,w = np.array(Image.open(in_list[0]).convert('L')).shape
#-- cropping, same numbers as hard coded in crop_input
#-- NOTE BE CAREFUL WITH THIS LATER. SHOULD MAKE INTO PARAMETER!
hcrop,wcrop = 30,25
#-- read files to fix dimensions are remove points dimmer than the threshold
for i in range(n_files):
img = np.array(Image.open(in_list[i]).convert('L'))/255.
#-- if the borders are dark, get rid of them
#img[0:20,:] = 1.
#img[-20:-1,:] = 1.
if at != 0.:
#-- clean up points below the threshold
img_flat = img.flatten()
ind_black = np.squeeze(np.nonzero(img_flat <= at))
ind_white = np.squeeze(np.nonzero(img_flat > at))
img_flat[ind_black] = 0.
img_flat[ind_white] = 1.
img = img_flat.reshape(img.shape)
#-- remove extra cropping that was done for pooling
img_nopad = np.ones(orig_shape)
img_nopad = img[:orig_shape[0],:orig_shape[1]]
#-- finally redo the cropping back to the original
h_final = orig_shape[0]+2*hcrop
w_final = orig_shape[1]+2*wcrop
img_final = np.ones((h_final,w_final))
img_final[hcrop:h_final-hcrop,wcrop:w_final-wcrop] = img_nopad[:,:]
#-- get rid of extra line that sometime shows at the border of original image
#ind = np.where(img_final[:,wcrop:w_final-wcrop]==1.)
#img_final[ind,:] = 1.
img_final[h+20:h+hcrop,:] = 1.
#-- save final image to file
outfile = os.path.join(subdir,'%s_%s.png'%(filenames[i][:-4],threshold_str))
scipy.misc.imsave(outfile, img_final)
#-- main function to get parameters and pass them along to the postprocessing function
def main():
if (len(sys.argv) == 1):
sys.exit('You need to input at least one parameter file to set run configurations.')
else:
#-- Input Parameter Files (sys.argv[0] is the python code)
input_files = sys.argv[1:]
#-- for each input parameter file
for file in input_files:
#-- keep track of progress
print(os.path.basename(file))
#-- variable with parameter definitions
parameters = {}
#-- Opening parameter file and assigning file ID number (fid)
fid = open(file, 'r')
#-- for each line in the file will extract the parameter (name and value)
for fileline in fid:
#-- Splitting the input line between parameter name and value
part = fileline.split()
#-- filling the parameter definition variable
parameters[part[0]] = part[1]
#-- close the parameter file
fid.close()
#-- pass parameters to training function
post_process(parameters)
if __name__ == '__main__':
main()