-
Notifications
You must be signed in to change notification settings - Fork 0
/
melanoma.py
107 lines (86 loc) · 3.29 KB
/
melanoma.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Activation, Dropout, Flatten, Dense
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras import optimizers
from keras.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
train_data_dir = '/path/to/train'
validation_data_dir = '/path/to/validation'
nb_train_samples = 1505
nb_validation_samples = 214
batch_size = 16
nb_epochs = 10
nb_fc_neurons = 512
nb_filter = 32
nb_conv2d = 3
img_width, img_height = 128, 128
log_name = 'keras_model_training.log'
model_name = 'keras_model.h5'
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
model = Sequential()
model.add(Conv2D(nb_filter, (3, 3), input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(nb_filter*2, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(nb_filter*4, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(nb_fc_neurons))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
test_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=True,
class_mode='binary')
best_model_val_acc = ModelCheckpoint('best_model_val_acc',monitor='val_acc',
mode = 'max', verbose=1, save_best_only=True)
best_model_val_loss = ModelCheckpoint('best_model_val_loss',monitor='val_loss',
mode = 'min', verbose=1, save_best_only=True)
# early stopping to prevent overfitting
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=0,
patience=2,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False
)
# create csv with training data log
csv_logger = CSVLogger('models/'+log_name)
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=nb_epochs,
shuffle=True,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[csv_logger, best_model_val_acc, best_model_val_loss]
)
# save model
model.save('models/' + model_name)