-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathself_driving_car_train.py
169 lines (135 loc) · 5.69 KB
/
self_driving_car_train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import datetime
import os
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from config import Config
from self_driving_car_batch_generator import Generator
from utils import get_driving_styles, mapping, mappingrgb
from utils_models import *
np.random.seed(0)
def load_data(cfg):
"""
Load training data_nominal and split it into training and validation set
"""
drive = get_driving_styles(cfg)
print("Loading training set " + str(cfg.TRACK) + str(drive))
start = time.time()
x = None
y = None
path = None
x_train = None
y_train = None
x_test = None
y_test = None
for drive_style in drive:
try:
path = os.path.join(cfg.TRAINING_DATA_DIR, #/mnt/c
cfg.TRAINING_SET_DIR, #Unet
cfg.TRACK, #track1
drive_style,
'driving_log.csv')
#path = "/mnt/c/Unet/track1/normal/driving_log.csv"
data_df = pd.read_csv(path)
if x is None:
x = data_df[['center', 'left', 'right']].values
y = data_df['steering'].values
else:
x = np.concatenate((x, data_df[['center', 'left', 'right']].values), axis=0)
y = np.concatenate((y, data_df['steering'].values), axis=0)
except FileNotFoundError:
print("Unable to read file %s" % path)
continue
if x is None:
print("No driving data_nominal were provided for training. Provide correct paths to the driving_log.csv files")
exit()
try:
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=cfg.TEST_SIZE, random_state=0)
except TypeError:
print("Missing header to csv files")
exit()
duration_train = time.time() - start
print("Loading training set completed in %s." % str(datetime.timedelta(seconds=round(duration_train))))
print("Data set: " + str(len(x)) + " elements")
print("Training set: " + str(len(x_train)) + " elements")
print("Test set: " + str(len(x_test)) + " elements")
return x_train, x_test, y_train, y_test
def train_model(model, cfg, x_train, x_test, y_train, y_test):
"""
Train the self-driving car model
"""
if cfg.USE_PREDICTIVE_UNCERTAINTY:
name = os.path.join(cfg.SDC_MODELS_DIR,
cfg.TRACK + '-' + cfg.SDC_MODEL_NAME.replace('.h5', '') + '-mc' + '-{epoch:03d}.h5')
else:
name = os.path.join(cfg.SDC_MODELS_DIR,
cfg.TRACK + '-' + cfg.SDC_MODEL_NAME.replace('.h5', '') + '-{epoch:03d}.h5')
checkpoint = ModelCheckpoint(
name,
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto')
early_stop = keras.callbacks.EarlyStopping(monitor='loss',
min_delta=.0005,
patience=10,
mode='auto')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=cfg.LEARNING_RATE))
x_train, y_train = shuffle(x_train, y_train, random_state=0)
x_test, y_test = shuffle(x_test, y_test, random_state=0)
train_generator = Generator(x_train, y_train, True, cfg)
val_generator = Generator(x_test, y_test, False, cfg)
history = model.fit(train_generator,
validation_data=val_generator,
epochs=cfg.NUM_EPOCHS_SDC_MODEL,
callbacks=[checkpoint, early_stop],
verbose=1)
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show()
if cfg.USE_PREDICTIVE_UNCERTAINTY:
name = os.path.join(cfg.SDC_MODELS_DIR,
cfg.TRACK + '-' + cfg.SDC_MODEL_NAME.replace('.h5', '') + '-mc-final.h5')
else:
name = os.path.join(cfg.SDC_MODELS_DIR, cfg.TRACK + '-' + cfg.SDC_MODEL_NAME.replace('.h5', '') + '-final.h5')
# save the last model anyway (might not be the best)
model.save(name)
def seg_process(model, x_train, x_test, y_train, y_test, cfg):
x_train, y_train = shuffle(x_train, y_train, random_state=0)
x_test, y_test = shuffle(x_test, y_test, random_state=0)
train_generator = Generator(x_train, y_train, True, cfg)
val_generator = Generator(x_test, y_test, False, cfg)
return train_generator, val_generator
def main():
from models.model2 import U_Net
import torch
"""
Load train/validation data_nominal set and train the model
"""
cfg = Config()
cfg.from_pyfile("config_my.py")
####################
Seg_model = U_Net(3, 2)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
checkpoint_path = '/mnt/c/Unet/SegmentationModel.pth'
Seg_model.load_state_dict(torch.load(checkpoint_path, map_location=device))
Seg_model.eval()
####################
x_train, x_test, y_train, y_test = load_data(cfg)
print(x_train)
model = build_model(cfg.SDC_MODEL_NAME, cfg.USE_PREDICTIVE_UNCERTAINTY)
#this method use segmentation model to limitate the road
train_generator, val_generator = seg_process(Seg_model, x_train, x_test, y_train, y_test, cfg)
train_model(model, cfg, x_train, x_test, y_train, y_test)
if __name__ == '__main__':
main()