-
Notifications
You must be signed in to change notification settings - Fork 0
/
LC_unet_classifier.py
154 lines (122 loc) · 6.64 KB
/
LC_unet_classifier.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
import argparse
import numpy as np
import tensorflow as tf
import keras
from keras.layers import InputLayer, Dense, Activation, Dropout, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D, SpatialDropout2D, AveragePooling2D, UpSampling2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.utils import plot_model
from LC_parser import *
def create_unet(image_size=[64,64,1],
hrrr=True,
n_hrrr_params=5,
ksc_wx_twr=False,
n_ksc_twr_params=4,
ksc_efm = False,
filters=[8,16,32],
conv_size=2,
pool_size=2,
deep=4,
n_conv_per_step=2,
lrate=.0001,
n_types=1,
loss='cross-entropy',#string or tensor flow object
metrics='accuracy', #string or tensor flow object
padding='same', #string
strides=1, #int
conv_activation='elu', #string, consider relu
last_activation='linear', #predicting the amount of lightning
batch_normalization=False,
dropout=0.0,
skip=False):
'''
This functions builds a U-Net for LaunchCast. It builds down the number of layers
determined by the deep parameter. It performs MaxPooling after each convolution block
described by the n_conv_step parameter. Activation functions throughout the U are
provided, along with the activation function for the last layer. Strides, padding,
loss functions, and metrics are provided too. The filters is a list of the number
of filters to use, for every convolution down. The filters are flipped along the way
up. Flags/booleans for KSC data sets and regularization
are also provided.
'''
#build the input layers for each hrrr sfc parameter
hrrr_u = tf.keras.Input(shape=(image_size,image_size,1),
dtype=tf.dtypes.float64,
name='hrrr_sfc_u')
hrrr_v = tf.keras.Input(shape=(image_size,image_size,1),
dtype=tf.dtypes.float64,
name='hrrr_sfc_v')
hrrr_temp = tf.keras.Input(shape=(image_size,image_size,1),
dtype=tf.dtypes.float64,
name='hrrr_sfc_temp')
hrrr_moist = tf.keras.Input(shape=(image_size,image_size,1),
dtype=tf.dtypes.float64,
name='hrrr_sfc_moist')
hrrr_sfc_pres = tf.keras.Input(shape=(image_size,image_size,1),
dtype=tf.dtypes.float64,
name='hrrr_sfc_pres')
#concatenate the layers before doing the convolutions
input_tensor=tf.concat([hrrr_u, hrrr_v, hrrr_temp, hrrr_moist, hrrr_sfc_pres],axis=3)
tensor = input_tensor
#go down the U-Net
for i,f in enumerate(filters):
#build the convolution layer
tensor = Conv2D(filters=f,padding=padding,strides=1,kernel_size=(conv_size,conv_size),use_bias=True,activation=conv_activation, name='Down_Conv_f'+str(f)+'_'+conv_activation)(tensor)
#conduct pooling
tensor = MaxPooling2D(pool_size=(pool_size,pool_size),strides=(pool_size,pool_size),name='Down_Pool_'+str(i))(tensor)
#learn at the bottom
tensor = Conv2D(filters=f,padding=padding,strides=1,kernel_size=(conv_size,conv_size),use_bias=True,activation=conv_activation, name='Bottom_Conv_f'+str(f)+'_'+conv_activation)(tensor)
#flip the filters to build back up the U
filters = np.flip(filters)
print(filters)
for i,f in enumerate(filters):
#build the convolution layer
tensor = Conv2D(filters=f,padding=padding,strides=1,kernel_size=(conv_size,conv_size),use_bias=True,activation=conv_activation,name='Up_Conv_f'+str(f)+'_'+conv_activation)(tensor)
#upsample to higher resolution
tensor = UpSampling2D(size=(pool_size,pool_size),name='Up_UpSample_'+str(i))(tensor)
#one last learning block
tensor = Conv2D(filters=f,padding=padding,strides=1,kernel_size=(conv_size,conv_size),use_bias=True,activation=conv_activation,name='Top_Conv_f'+str(f)+'_'+conv_activation)(tensor)
#build the output layer. Use softmax if you want to predict the probability
#of CC and CG lightning. Use linear or something greater than 0 to predict the
#amount of CC and CG lightning.
output_tensor = Conv2D(filters=n_types,padding=padding,strides=1,kernel_size=(conv_size,conv_size),use_bias=True,name='MERLIN_CG',activation=last_activation)(tensor)
#compile the model
model = Model(inputs=[hrrr_u, hrrr_v, hrrr_temp, hrrr_moist, hrrr_sfc_pres],outputs=output_tensor)
opt = keras.optimizers.Adam(learning_rate=lrate, amsgrad=False)
model.compile(optimizer=opt,loss=loss)
return model
# if __name__ == "__main__":
# print('LC_unet_classifier.py main function')
# # Parse and check incoming arguments
# parser = create_parser()
# args = parser.parse_args()
# print(args)
# image_size=[64,64,1]
# if args.build_model:
# print('building the model')
# model = create_unet(image_size=image_size,
# hrrr=args.hrrr,
# n_hrrr_params=args.n_hrrr_params,
# ksc_wx_twr=args.ksc_wx_twr,
# n_ksc_twr_params=args.n_wxtwr_params,
# ksc_efm = args.ksc_efm,
# filters=args.conv_nfilters,
# conv_size=args.conv_size,
# pool_size=args.pool,
# deep=args.deep,
# n_conv_per_step=args.n_conv_per_step,
# lrate=args.lrate,
# loss=tf.keras.losses.MeanSquaredError(),#tensor flow loss function
# metrics=tf.keras.metrics.SparseCategoricalAccuracy(),#tensor flow metrics
# padding=args.padding,#string, same,valid,etc.
# strides=args.stride,#int, pixel stride
# conv_activation=args.activation_conv,
# last_activation=args.activation_last,
# batch_normalization=args.batch_normalization,
# dropout=args.dropout,
# skip=args.skip)
# print(model.summary())
# # Plot the model if the model is built
# if args.render and args.build_model:
# render_fname = 'LC_model_test.png'
# plot_model(model, to_file=render_fname, show_shapes=True, show_layer_names=True)