Skip to content

Commit

Permalink
Updating to allow multi-class output
Browse files Browse the repository at this point in the history
  • Loading branch information
mas-dse-greina committed Mar 20, 2018
1 parent 1ab5ceb commit b8a406a
Show file tree
Hide file tree
Showing 2 changed files with 47 additions and 35 deletions.
51 changes: 38 additions & 13 deletions memory_benchmarking/benchmark_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,10 @@
type = int,
default=1,
help="Number of channels")
parser.add_argument("--num_outputs",
type = int,
default=1,
help="Number of outputs")

parser.add_argument("--bz",
type = int,
Expand Down Expand Up @@ -76,11 +80,16 @@
action="store_true",
default=False,
help="Use binary classifier instead of U-Net")
parser.add_argument("--mkl_verbose",
action="store_true",
default=False,
help="Print MKL debug statements.")

args = parser.parse_args()

os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # Get rid of the AVX, SSE warnings
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
if args.mkl_verbose:
os.environ["MKL_VERBOSE"] = "1" # Print out messages from MKL operations
os.environ["OMP_NUM_THREADS"] = str(args.intraop_threads)
os.environ["KMP_BLOCKTIME"] = str(args.blocktime)
os.environ["KMP_AFFINITY"] = "granularity=thread,compact,1,0"
Expand All @@ -98,13 +107,22 @@
args.dim_length,
args.dim_length,
args.num_channels]
out_shape = [args.bz,
args.dim_length,
args.dim_length,
args.num_outputs]
else: # Define shape of the tensors (3D)
dims=(1,2,3)
tensor_shape = [args.bz,
args.dim_length,
args.dim_length,
args.dim_length,
args.num_channels]
tensor_shape = [args.bz,
args.dim_length,
args.dim_length,
args.dim_length,
args.num_outputs]

# Optimize CPU threads for TensorFlow
config = tf.ConfigProto(
Expand All @@ -121,35 +139,35 @@
img = tf.placeholder(tf.float32, shape=tensor_shape) # Input tensor

if args.single_class_output:
truth = tf.placeholder(tf.float32, shape=(args.bz,1)) # Label tensor
truth = tf.placeholder(tf.float32, shape=(args.bz,args.num_outputs)) # Label tensor
else:
truth = tf.placeholder(tf.float32, shape=tensor_shape) # Label tensor
truth = tf.placeholder(tf.float32, shape=out_shape) # Label tensor

# Define the model
# Predict the output mask

if args.single_class_output:
if args.D2: # 2D convnet model
predictions = conv2D(img,
print_summary=args.print_model)
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D convet model
predictions = conv3D(img,
print_summary=args.print_model)
print_summary=args.print_model, n_out=args.num_outputs)
else:

if args.D2: # 2D U-Net model
predictions = unet2D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model)
print_summary=args.print_model, n_out=args.num_outputs)
else: # 3D U-Net model
predictions = unet3D(img,
use_upsampling=args.use_upsampling,
print_summary=args.print_model)
print_summary=args.print_model, n_out=args.num_outputs)

# Performance metrics for model
if args.single_class_output:
loss = tf.losses.sigmoid_cross_entropy(truth, predictions)
metric_score = tf.keras.metrics.binary_accuracy(truth, predictions)
metric_score = tf.metrics.mean_squared_error(truth, predictions)
else:
loss = dice_coef_loss(truth, predictions, dims) # Loss is the dice between mask and prediction
metric_score = dice_coef(truth, predictions, dims)
Expand All @@ -160,13 +178,15 @@
imgs = np.random.rand(*tensor_shape)

if args.single_class_output:
truths = np.random.rand(args.bz, 1)
truths = np.random.rand(args.bz, args.num_outputs)
else:
truths = imgs + np.random.rand(*tensor_shape)
truths = np.random.rand(*out_shape)

# Initialize all variables
init_op = tf.global_variables_initializer()
init_l = tf.local_variables_initializer() # For TensorFlow metrics
sess.run(init_op)
sess.run(init_l)

# Set up trace for operations
run_metadata = tf.RunMetadata()
Expand All @@ -188,9 +208,14 @@
options=run_options, run_metadata=run_metadata)

# Print the loss and dice metric in the progress bar.
progressbar.set_description(
"Epoch {}/{}: (loss={}, metric={})".format(
epoch+1, args.epochs, loss_v, metric_v))
if args.single_class_output:
progressbar.set_description(
"Epoch {}/{}: (loss={:.4f}, MSE={:.4f})".format(
epoch+1, args.epochs, loss_v, metric_v[1]))
else:
progressbar.set_description(
"Epoch {}/{}: (loss={:.4f}, dice={:.4f})".format(
epoch+1, args.epochs, loss_v, metric_v))
progressbar.update(this_step-last_step)
last_step = this_step

Expand Down
31 changes: 9 additions & 22 deletions memory_benchmarking/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def dice_coef_loss(target, prediction, axis=(1,2,3), smooth=1e-5):
concat_axis = 1
data_format = "channels_first"

def unet3D(input_img, use_upsampling=False, n_cl_out=1, dropout=0.2,
def unet3D(input_img, use_upsampling=False, n_out=1, dropout=0.2,
print_summary = False):
"""
3D U-Net model
Expand Down Expand Up @@ -118,7 +118,7 @@ def unet3D(input_img, use_upsampling=False, n_cl_out=1, dropout=0.2,

conv7 = keras.layers.Conv3D(name="conv7a", filters=128, **params)(up6)
conv7 = keras.layers.Conv3D(name="conv7b", filters=128, **params)(conv7)
pred = keras.layers.Conv3D(name="PredictionMask", filters=n_cl_out, kernel_size=(1, 1, 1),
pred = keras.layers.Conv3D(name="PredictionMask", filters=n_out, kernel_size=(1, 1, 1),
data_format=data_format, activation="sigmoid")(conv7)

if print_summary:
Expand All @@ -128,7 +128,7 @@ def unet3D(input_img, use_upsampling=False, n_cl_out=1, dropout=0.2,
return pred

def unet2D(input_tensor, use_upsampling=False,
n_cl_out=1, dropout=0.2, print_summary = False):
n_out=1, dropout=0.2, print_summary = False):
"""
2D U-Net
"""
Expand Down Expand Up @@ -213,7 +213,7 @@ def unet2D(input_tensor, use_upsampling=False,
conv9 = keras.layers.Conv2D(name="conv9a", filters=32, **params)(up9)
conv9 = keras.layers.Conv2D(name="conv9b", filters=32, **params)(conv9)

pred = keras.layers.Conv2D(name="PredictionMask", filters=n_cl_out, kernel_size=(1, 1),
pred = keras.layers.Conv2D(name="PredictionMask", filters=n_out, kernel_size=(1, 1),
data_format=data_format, activation="sigmoid")(conv9)

if print_summary:
Expand All @@ -222,31 +222,18 @@ def unet2D(input_tensor, use_upsampling=False,

return pred

def conv3D(input_img, print_summary = False, dropout=0.2):
def conv3D(input_img, print_summary = False, dropout=0.2, n_out=1):
"""
Simple 3D convolution model based on VGG-16
"""
print("3D Convolutional Binary Classifier based on VGG-16")

# Set keras learning phase to train
keras.backend.set_learning_phase(True)

# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)

inputs = keras.layers.Input(tensor=input_img, name="Input_Image")

params = dict(kernel_size=(3, 3, 3), activation="relu",
padding="same", data_format=data_format,
kernel_initializer="he_uniform")

# Set keras learning phase to train
keras.backend.set_learning_phase(True)

# Don"t initialize variables on the fly
keras.backend.manual_variable_initialization(False)

inputs = keras.layers.Input(tensor=input_tensor, name="Images")
inputs = keras.layers.Input(tensor=input_img, name="Images")

params = dict(kernel_size=(3, 3, 3), activation="relu",
padding="same", data_format=data_format,
Expand Down Expand Up @@ -279,7 +266,7 @@ def conv3D(input_img, print_summary = False, dropout=0.2):
dense1 = keras.layers.Dense(4096, activation="relu")(flat)
drop1 = keras.layers.Dropout(dropout)(dense1)
dense2 = keras.layers.Dense(4096, activation="relu")(drop1)
pred = keras.layers.Dense(1, name="Prediction", activation="sigmoid")(dense2)
pred = keras.layers.Dense(n_out, name="Prediction", activation="sigmoid")(dense2)

if print_summary:
model = keras.models.Model(inputs=[inputs], outputs=[pred])
Expand All @@ -288,7 +275,7 @@ def conv3D(input_img, print_summary = False, dropout=0.2):
return pred


def conv2D(input_tensor, print_summary = False, dropout=0.2):
def conv2D(input_tensor, print_summary = False, dropout=0.2, n_out=1):

"""
Simple 2D convolution model based on VGG-16
Expand Down Expand Up @@ -334,7 +321,7 @@ def conv2D(input_tensor, print_summary = False, dropout=0.2):
dense1 = keras.layers.Dense(4096, activation="relu")(flat)
drop1 = keras.layers.Dropout(dropout)(dense1)
dense2 = keras.layers.Dense(4096, activation="relu")(drop1)
pred = keras.layers.Dense(1, name="Prediction", activation="sigmoid")(dense2)
pred = keras.layers.Dense(n_out, name="Prediction", activation="sigmoid")(dense2)

if print_summary:
model = keras.models.Model(inputs=[inputs], outputs=[pred])
Expand Down

0 comments on commit b8a406a

Please sign in to comment.