Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update requirements, Fixed Bugs, Add trained and quantized models #167

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
20 changes: 20 additions & 0 deletions benchmark/training/image_classification/Logs/testing_log.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
Test data: (200, 32, 32, 3)
Test filenames: (200,)
Test labels: (200, 10)
Label names: (10,)
Label classes: (200,)
7/7 - 3s - 405ms/step - accuracy: 0.9000 - loss: 0.3835
Performances on cifar10 test set
Keras evaluate method
Accuracy keras: 0.8999999761581421
---------------------
1/7 ━━━━━━━━━━━━━━━━━━━━ 2s 441ms/step7/7 ━━━━━━━━━━━━━━━━━━━━ 0s 66ms/step 7/7 ━━━━━━━━━━━━━━━━━━━━ 1s 67ms/step
EEMBC calculate_accuracy method
Overall accuracy = 90.0
---------------------
sklearn.metrics.roc_auc_score method
AUC sklearn: 0.992388888888889
---------------------
EEMBC calculate_auc method
Simplified average roc_auc = 0.990
---------------------
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
Label classes: (200,)
EEMBC calculate_accuracy method
Overall accuracy = 88.5
---------------------
sklearn.metrics.roc_auc_score method
AUC sklearn: 0.9885555555555555
---------------------
EEMBC calculate_auc method
Simplified average roc_auc = 0.925
---------------------
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1,599 changes: 1,599 additions & 0 deletions benchmark/training/image_classification/Logs/training_log.txt

Large diffs are not rendered by default.

14 changes: 11 additions & 3 deletions benchmark/training/image_classification/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,20 @@ Dataset: Cifar10

Run the following commands to go through the whole training and validation process

Recommend creating one virtual environment before the experiment

``` Bash
# Prepare Python venv (Python 3.7+ and pip>20 required)
# Prepare Python venv (Python 3.9+ and pip>20 required)
./prepare_training_env.sh

# Download training, train model, test the model
./download_cifar10_train_resnet.sh
# Download dataset
./download_cifar10.sh

# Load the performance subset
./load_performance_subset.sh

# Train and test the model
./train_test_model.sh

# Convert the model to TFlite, and test conversion quality
./convert_to_tflite.sh
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/bin/sh

. venv/bin/activate
python3 model_converter.py
python3 tflite_test.py
python3 tflite_test.py > Logs/tflite_testing_log.txt
5 changes: 5 additions & 0 deletions benchmark/training/image_classification/download_cifar10.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
#!/bin/bash

# Downoad the dataset.
wget https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
tar -xvf cifar-10-python.tar.gz
19 changes: 13 additions & 6 deletions benchmark/training/image_classification/eval_functions_eembc.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,18 +107,23 @@ def calculate_auc(y_pred, labels, classes, name):
roc_auc_avg = np.mean(roc_auc)
print(f"Simplified average roc_auc = {roc_auc_avg:.3f}")

plt.figure()
plt.figure(figsize=(8, 6), dpi = 600)
for class_item in range(n_classes):
plt.plot(fpr[class_item, :], tpr[class_item, :],
label=f"auc: {roc_auc[class_item]:0.3f} ({classes[class_item]})")
plt.xlim([0.0, 0.1])
plt.ylim([0.5, 1.0])
plt.legend(loc="lower right")
#plt.xlim([0.0, 0.1])
#plt.ylim([0.5, 1.0])
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.1])
plt.legend(loc="center left", bbox_to_anchor=(1, 0.5), fontsize=10)
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC: ' + name)
plt.grid(which='major')
plt.show(block=False)
plt.tight_layout()
plt.savefig('Logs/auc_result.png', dpi=600, bbox_inches='tight')

plt.close()

return roc_auc

Expand Down Expand Up @@ -182,7 +187,9 @@ def calculate_ae_pr_accuracy(y_pred, y_true):
plt.ylabel('Precision')
plt.title('Precision vs Recall')
plt.grid(which='major')
plt.show(block=False)
plt.savefig('Logs/PR_result', dpi = 600)

plt.close()

return accuracy

Expand Down
4 changes: 2 additions & 2 deletions benchmark/training/image_classification/keras_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ def get_model_name():
return "pretrainedResnet"

def get_quant_model_name():
if os.path.exists("trained_models/trainedResnet.h5"):
return "trainedResnet"
if os.path.exists("trained_models/trainedRes_quant.tflite"):
return "trainedRes"
else:
return "pretrainedResnet"

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
# load performance subset
python3 perf_samples_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import train
from test import model_name

tfmodel_path = 'trained_models/' + model_name
tfmodel_path = 'trained_models/' + model_name + '.h5'
tfmodel = tf.keras.models.load_model(tfmodel_path)
cifar_10_dir = 'cifar-10-batches-py'
model_name = model_name[:-3]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#!/bin/sh

python3 -m venv venv
. venv/bin/activate
pip3 install -r requirements.txt
88 changes: 44 additions & 44 deletions benchmark/training/image_classification/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,47 +1,47 @@
absl-py==0.11.0
absl-py==2.1.0
astunparse==1.6.3
cachetools==4.2.1
certifi==2020.12.5
chardet==4.0.0
cycler==0.10.0
flatbuffers==1.12
gast==0.3.3
google-auth==1.27.0
google-auth-oauthlib==0.4.2
cachetools==5.5.0
certifi==2024.12.14
chardet==5.2.0
cycler==0.12.1
flatbuffers==24.3.25
gast==0.6.0
google-auth==2.37.0
google-auth-oauthlib==1.2.1
google-pasta==0.2.0
grpcio==1.32.0
h5py==2.10.0
idna==2.10
imageio==2.9.0
joblib==1.0.1
grpcio==1.68.1
h5py==3.12.1
idna==3.10
imageio==2.36.1
joblib==1.4.2
Keras-Preprocessing==1.1.2
kiwisolver==1.3.1
Markdown==3.3.3
matplotlib==3.3.4
numpy==1.19.5
oauthlib==3.1.0
opencv-python==4.5.1.48
opt-einsum==3.3.0
Pillow==8.1.0
protobuf==3.14.0
pyasn1==0.4.8
pyasn1-modules==0.2.8
pyparsing==2.4.7
python-dateutil==2.8.1
PyYAML==5.4.1
requests==2.25.1
requests-oauthlib==1.3.0
rsa==4.7.1
scikit-learn==0.24.1
scipy==1.6.0
six==1.15.0
tensorboard==2.4.1
tensorboard-plugin-wit==1.8.0
tensorflow==2.4.1
tensorflow-estimator==2.4.0
termcolor==1.1.0
threadpoolctl==2.1.0
typing-extensions==3.7.4.3
urllib3==1.26.3
Werkzeug==1.0.1
wrapt==1.12.1
kiwisolver==1.4.8
Markdown==3.7
matplotlib==3.10.0
numpy==2.0.2
oauthlib==3.2.2
opencv-python==4.10.0.84
opt-einsum==3.4.0
Pillow==11.1.0
protobuf==5.29.1
pyasn1==0.6.1
pyasn1-modules==0.4.1
pyparsing==3.2.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
scikit-learn==1.6.0
scipy==1.15.0
six==1.17.0
tensorboard==2.18.0
tensorboard-plugin-wit==1.8.1
tensorflow==2.18.0
tensorflow-estimator==2.15.0
termcolor==2.5.0
threadpoolctl==3.5.0
typing-extensions==4.12.2
urllib3==2.2.3
Werkzeug==3.1.3
wrapt==1.17.0
2 changes: 1 addition & 1 deletion benchmark/training/image_classification/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@

model = tf.keras.models.load_model('trained_models/' + model_name + '.h5')

test_metrics = model.evaluate(x=test_data, y=test_labels, batch_size=32, verbose=1, return_dict=True)
test_metrics = model.evaluate(x=test_data, y=test_labels, batch_size=32, verbose=2, return_dict=True)

print("Performances on cifar10 test set")
print("Keras evaluate method")
Expand Down
2 changes: 1 addition & 1 deletion benchmark/training/image_classification/tflite_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
_name = keras_model.get_quant_model_name()
model_path = 'trained_models/' + _name + '_quant.tflite'
else:
_name = keras_model.get_quant_model_name()
_name = keras_model.get_model_name()
model_path = 'trained_models/' + _name + '.tflite'

if __name__ == '__main__':
Expand Down
18 changes: 13 additions & 5 deletions benchmark/training/image_classification/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ def lr_schedule(epoch):
horizontal_flip=True,
#brightness_range=(0.9, 1.2),
#contrast_range=(0.9, 1.2),
validation_split=0.2
#validation_split=0.2
)

def unpickle(file):
Expand Down Expand Up @@ -141,6 +141,8 @@ def load_cifar_10_data(data_dir, negatives=False):
# Don't forget that the label_names and filesnames are in binary and need conversion if used.

# display some random training images in a 25x25 grid
plt.figure()

num_plot = 5
f, ax = plt.subplots(num_plot, num_plot)
for m in range(num_plot):
Expand All @@ -151,7 +153,9 @@ def load_cifar_10_data(data_dir, negatives=False):
ax[m, n].get_yaxis().set_visible(False)
f.subplots_adjust(hspace=0.1)
f.subplots_adjust(wspace=0)
plt.show()
plt.savefig('Logs/training_images.png')

plt.close()

new_model = keras_model.resnet_v1_eembc()
new_model.summary()
Expand All @@ -161,15 +165,19 @@ def load_cifar_10_data(data_dir, negatives=False):
datagen.fit(train_data)

new_model.compile(
optimizer=optimizer, loss='categorical_crossentropy', metrics='accuracy', loss_weights=None,
optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy'], loss_weights=None,
weighted_metrics=None, run_eagerly=None )

# fits the model on batches with real-time data augmentation:
History = new_model.fit(datagen.flow(train_data, train_labels, batch_size=BS),
steps_per_epoch=len(train_data) / BS, epochs=EPOCHS, callbacks=[lr_scheduler])
epochs=EPOCHS, callbacks=[lr_scheduler], verbose=2)

plt.figure()

plt.plot(np.array(range(EPOCHS)), History.history['loss'])
plt.plot(np.array(range(EPOCHS)), History.history['accuracy'])
plt.savefig('train_loss_acc.png')
plt.savefig('Logs/train_loss_acc.png', dpi = 600)
model_name = "trainedResnet.h5"
new_model.save("trained_models/" + model_name)

plt.close()
8 changes: 8 additions & 0 deletions benchmark/training/image_classification/train_test_model.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
# train and test the model

if [ ! -d "Logs" ]; then
mkdir -p Logs
fi

python3 train.py > Logs/training_log.txt
python3 test.py > Logs/testing_log.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
Model get saved as:
FP32 Model get saved as:
trainedResnet.h5
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading