diff --git a/.github/.isort.cfg b/.github/.isort.cfg
new file mode 100644
index 00000000..728ef79d
--- /dev/null
+++ b/.github/.isort.cfg
@@ -0,0 +1,8 @@
+[settings]
+src_paths=aucmedi
+line_length=120
+known_first_party=aucmedi
+import_heading_stdlib=Python Standard Library
+import_heading_thirdparty=Third Party Libraries
+import_heading_firstparty=Internal Libraries
+skip=__init__.py
diff --git a/.github/flake8.cfg b/.github/flake8.cfg
new file mode 100644
index 00000000..8b184567
--- /dev/null
+++ b/.github/flake8.cfg
@@ -0,0 +1,17 @@
+[flake8]
+extend-ignore =
+ E124,
+ E127,
+ E128,
+ # all ignore visual indent errors
+ E701,
+ # ignore multi-statement per line (because of 'name: type' error)
+ E731,
+ # allow lambdas
+ E265,
+ # ignore block comment should start with '# '
+ E231,
+ # ignore missing whitespace after ','
+ W503
+ # ignore line break before binary operator
+max-line-length = 120
diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml
index f965d31b..fcad06bc 100644
--- a/.github/workflows/code-quality.yml
+++ b/.github/workflows/code-quality.yml
@@ -1,5 +1,6 @@
# This workflow will install Python dependencies and run tests for computing code coverage
-# Results will be uploaded to codecov
+# The results will be uploaded to codecov
+# The workflow further executes commitlint, isort, and flake8 linters to ensure a common code style
name: Code Quality
@@ -14,7 +15,7 @@ jobs:
name: Coverage (codecov)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
- name: Set up Python 3.10
uses: actions/setup-python@v2
with:
@@ -44,9 +45,46 @@ jobs:
name: Commit Convention
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: wagoid/commitlint-github-action@v4
with:
configFile: .github/commitlint.config.js
+
+ isort-lint:
+ runs-on: ubuntu-latest
+ name: Import Order
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.10'
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install isort
+ - name: isort Lint
+ run: |
+ python -m isort --settings-path .github --check-only aucmedi
+
+ flake8-lint:
+ runs-on: ubuntu-latest
+ name: Lint
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.10
+ uses: actions/setup-python@v2
+ with:
+ python-version: '3.10'
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ python -m pip install flake8
+ - name: Install package
+ run: |
+ python -m pip install .
+ - name: Run flake8 linter
+ run: |
+ python -m flake8 --config .github/flake8.cfg aucmedi
diff --git a/aucmedi/__init__.py b/aucmedi/__init__.py
index f7803840..62c0fc2f 100644
--- a/aucmedi/__init__.py
+++ b/aucmedi/__init__.py
@@ -68,7 +68,7 @@
# Run model inference for unknown samples
preds = model.predict(test_gen)
```
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
@@ -78,3 +78,13 @@
VolumeAugmentation, \
BatchgeneratorsAugmentation
from aucmedi.neural_network.model import NeuralNetwork
+
+
+__all__ = [
+ "input_interface",
+ "DataGenerator",
+ "ImageAugmentation",
+ "VolumeAugmentation",
+ "BatchgeneratorsAugmentation",
+ "NeuralNetwork"
+]
diff --git a/aucmedi/automl/__init__.py b/aucmedi/automl/__init__.py
index d83601fa..2fd8d50f 100644
--- a/aucmedi/automl/__init__.py
+++ b/aucmedi/automl/__init__.py
@@ -41,7 +41,7 @@
| `evaluation` | [CLI - Evaluation][aucmedi.automl.cli.cli_evaluation] | [Block - Evaluate][aucmedi.automl.block_eval] |
More information can be found in the docs: [Documentation - AutoML](../../automl/overview/)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
@@ -52,3 +52,12 @@
# Parser
from aucmedi.automl.parser_yaml import parse_yaml
from aucmedi.automl.parser_cli import parse_cli
+
+
+__all__ = [
+ "block_train",
+ "block_predict",
+ "block_evaluate",
+ "parse_yaml",
+ "parse_cli"
+]
diff --git a/aucmedi/automl/block_eval.py b/aucmedi/automl/block_eval.py
index 5242d108..613f3b3a 100644
--- a/aucmedi/automl/block_eval.py
+++ b/aucmedi/automl/block_eval.py
@@ -19,15 +19,19 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
-import pandas as pd
-import numpy as np
import re
-# Internal libraries
-from aucmedi import *
+
+# Third Party Libraries
+import numpy as np
+import pandas as pd
+
+# Internal Libraries
+from aucmedi import input_interface
from aucmedi.evaluation import evaluate_performance
+
#-----------------------------------------------------#
# Building Blocks for Evaluation #
#-----------------------------------------------------#
@@ -44,14 +48,20 @@ def block_evaluate(config):
Attributes:
path_imagedir (str): Path to the directory containing the ground truth images.
- path_gt (str): Path to the index/class annotation file if required. (only for 'csv' interface).
+ path_gt (str): Path to the index/class annotation file if required
+ (only for 'csv' interface).
path_pred (str): Path to the input file in which predicted csv file is stored.
- path_evaldir (str): Path to the directory in which evaluation figures and tables should be stored.
- ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot encoded.
+ path_evaldir (str): Path to the directory in which evaluation figures and tables should be
+ stored.
+ ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot
+ encoded.
"""
# Obtain interface
- if config["path_gt"] is None : config["interface"] = "directory"
- else : config["interface"] = "csv"
+ if config["path_gt"] is None:
+ config["interface"] = "directory"
+ else:
+ config["interface"] = "csv"
+
# Peak into the dataset via the input interface
ds = input_interface(config["interface"],
config["path_imagedir"],
@@ -73,7 +83,6 @@ def block_evaluate(config):
df_gt_data = pd.DataFrame(data=class_ohe, columns=class_names)
df_gt = pd.concat([df_index, df_gt_data], axis=1, sort=False)
-
# Verify - maybe there is a file path encoded in the index?
if os.path.sep in df_gt.iloc[0,0]:
samples_split = df_gt["SAMPLE"].str.split(pat=os.path.sep,
@@ -94,8 +103,10 @@ def block_evaluate(config):
data_gt = df_merged.iloc[:, (class_n+1):].to_numpy()
# Identify task (multi-class vs multi-label)
- if np.sum(data_pd) > (class_ohe.shape[0] + 1.5) : multi_label = True
- else : multi_label = False
+ if np.sum(data_pd) > (class_ohe.shape[0] + 1.5):
+ multi_label = True
+ else:
+ multi_label = False
# Evaluate performance via AUCMEDI evaluation submodule
evaluate_performance(data_pd, data_gt,
diff --git a/aucmedi/automl/block_pred.py b/aucmedi/automl/block_pred.py
index c305514e..74d95117 100644
--- a/aucmedi/automl/block_pred.py
+++ b/aucmedi/automl/block_pred.py
@@ -19,17 +19,21 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-import os
+# Python Standard Library
import json
+import os
+
+# Third Party Libraries
import pandas as pd
-# Internal libraries
-from aucmedi import *
+
+# Internal Libraries
+from aucmedi import DataGenerator, NeuralNetwork, input_interface
from aucmedi.data_processing.io_loader import image_loader, sitk_loader
-from aucmedi.data_processing.subfunctions import *
-from aucmedi.ensemble import *
+from aucmedi.data_processing.subfunctions import Chromer, Crop, Padding, Standardize
+from aucmedi.ensemble import Composite, predict_augmenting
from aucmedi.xai import xai_decoder
+
#-----------------------------------------------------#
# Building Blocks for Inference #
#-----------------------------------------------------#
@@ -46,10 +50,12 @@ def block_predict(config):
Attributes:
path_imagedir (str): Path to the directory containing the images for prediction.
- path_modeldir (str): Path to the model directory in which fitted model weights and metadata are stored.
+ path_modeldir (str): Path to the model directory in which fitted model weights and metadata are
+ stored.
path_pred (str): Path to the output file in which predicted csv file should be stored.
xai_method (str or None): Key for XAI method.
- xai_directory (str or None): Path to the output directory in which predicted image xai heatmaps should be stored.
+ xai_directory (str or None): Path to the output directory in which predicted image xai heatmaps should be
+ stored.
batch_size (int): Number of samples inside a single batch.
workers (int): Number of workers/threads which preprocess batches during runtime.
"""
@@ -102,23 +108,26 @@ def block_predict(config):
"shuffle": False,
"grayscale": False,
}
- if not meta_training["three_dim"] : paras_datagen["loader"] = image_loader
- else : paras_datagen["loader"] = sitk_loader
+ if not meta_training["three_dim"]:
+ paras_datagen["loader"] = image_loader
+ else:
+ paras_datagen["loader"] = sitk_loader
# Apply MIC pipelines
if meta_training["analysis"] == "minimal":
# Setup neural network
if not meta_training["three_dim"]:
arch_dim = "2D." + meta_training["architecture"]
- else : arch_dim = "3D." + meta_training["architecture"]
+ else:
+ arch_dim = "3D." + meta_training["architecture"]
model = NeuralNetwork(architecture=arch_dim, **nn_paras)
# Build DataGenerator
pred_gen = DataGenerator(samples=index_list,
- labels=None,
- resize=model.meta_input,
- standardize_mode=model.meta_standardize,
- **paras_datagen)
+ labels=None,
+ resize=model.meta_input,
+ standardize_mode=model.meta_standardize,
+ **paras_datagen)
# Load model
path_model = os.path.join(config["path_modeldir"], "model.last.keras")
model.load(path_model)
@@ -128,15 +137,16 @@ def block_predict(config):
# Setup neural network
if not meta_training["three_dim"]:
arch_dim = "2D." + meta_training["architecture"]
- else : arch_dim = "3D." + meta_training["architecture"]
+ else:
+ arch_dim = "3D." + meta_training["architecture"]
model = NeuralNetwork(architecture=arch_dim, **nn_paras)
# Build DataGenerator
pred_gen = DataGenerator(samples=index_list,
- labels=None,
- resize=model.meta_input,
- standardize_mode=model.meta_standardize,
- **paras_datagen)
+ labels=None,
+ resize=model.meta_input,
+ standardize_mode=model.meta_standardize,
+ **paras_datagen)
# Load model
path_model = os.path.join(config["path_modeldir"],
"model.best_loss.keras")
@@ -147,19 +157,21 @@ def block_predict(config):
# Build multi-model list
model_list = []
for arch in meta_training["architecture"]:
- if not meta_training["three_dim"] : arch_dim = "2D." + arch
- else : arch_dim = "3D." + arch
+ if not meta_training["three_dim"]:
+ arch_dim = "2D." + arch
+ else:
+ arch_dim = "3D." + arch
model_part = NeuralNetwork(architecture=arch_dim, **nn_paras)
model_list.append(model_part)
el = Composite(model_list, metalearner=meta_training["metalearner"],
- k_fold=len(meta_training["architecture"]))
+ k_fold=len(meta_training["architecture"]))
# Build DataGenerator
pred_gen = DataGenerator(samples=index_list,
- labels=None,
- resize=None,
- standardize_mode=None,
- **paras_datagen)
+ labels=None,
+ resize=None,
+ standardize_mode=None,
+ **paras_datagen)
# Load composite model directory
el.load(config["path_modeldir"])
# Start model inference via ensemble learning
diff --git a/aucmedi/automl/block_train.py b/aucmedi/automl/block_train.py
index 87f7e639..7ca16e5d 100644
--- a/aucmedi/automl/block_train.py
+++ b/aucmedi/automl/block_train.py
@@ -19,22 +19,25 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+import json
import os
+
+# Third Party Libraries
import numpy as np
-import json
+from tensorflow.keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.metrics import AUC
-from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, \
- ReduceLROnPlateau, EarlyStopping
-# Internal libraries
-from aucmedi import *
+
+# Internal Libraries
+from aucmedi import BatchgeneratorsAugmentation, DataGenerator, ImageAugmentation, NeuralNetwork, input_interface
from aucmedi.data_processing.io_loader import image_loader, sitk_loader
-from aucmedi.sampling import sampling_split
-from aucmedi.utils.class_weights import *
-from aucmedi.data_processing.subfunctions import *
-from aucmedi.neural_network.loss_functions import *
-from aucmedi.ensemble import *
+from aucmedi.data_processing.subfunctions import Chromer, Crop, Padding, Standardize
+from aucmedi.ensemble import Composite
from aucmedi.evaluation import evaluate_fitting
+from aucmedi.neural_network.loss_functions import categorical_focal_loss, multilabel_focal_loss
+from aucmedi.sampling import sampling_split
+from aucmedi.utils.class_weights import compute_class_weights, compute_multilabel_weights
+
#-----------------------------------------------------#
# Building Blocks for Training #
@@ -53,9 +56,12 @@ def block_train(config):
Attributes:
path_imagedir (str): Path to the directory containing the images.
path_modeldir (str): Path to the output directory in which fitted models and metadata are stored.
- path_gt (str): Path to the index/class annotation file if required. (only for 'csv' interface).
- analysis (str): Analysis mode for the AutoML training. Options: `["minimal", "standard", "advanced"]`.
- ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot encoded.
+ path_gt (str): Path to the index/class annotation file if required
+ (only for 'csv' interface).
+ analysis (str): Analysis mode for the AutoML training. Options:
+ `["minimal", "standard", "advanced"]`.
+ ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot
+ encoded.
three_dim (bool): Boolean, whether data is 2D or 3D.
shape_3D (tuple of int): Desired input shape of 3D volume for architecture (will be cropped).
epochs (int): Number of epochs. A single epoch is defined as one iteration through
@@ -66,8 +72,10 @@ def block_train(config):
architecture (str or list of str): Key (str) of a neural network model Architecture class instance.
"""
# Obtain interface
- if config["path_gt"] is None : config["interface"] = "directory"
- else : config["interface"] = "csv"
+ if config["path_gt"] is None:
+ config["interface"] = "directory"
+ else:
+ config["interface"] = "csv"
# Peak into the dataset via the input interface
ds = input_interface(config["interface"],
config["path_imagedir"],
@@ -82,8 +90,10 @@ def block_train(config):
os.mkdir(config["path_modeldir"])
# Identify task (multi-class vs multi-label)
- if np.sum(class_ohe) > class_ohe.shape[0] : config["multi_label"] = True
- else : config["multi_label"] = False
+ if np.sum(class_ohe) > class_ohe.shape[0]:
+ config["multi_label"] = True
+ else:
+ config["multi_label"] = False
# Sanity check on multi-label metalearner
multilabel_metalearner_supported = ["mlp", "k_neighbors", "random_forest",
@@ -91,7 +101,7 @@ def block_train(config):
"decision_tree", "mean", "median"]
if config["multi_label"] and config["analysis"] == "advanced" and \
config["metalearner"] not in multilabel_metalearner_supported:
- raise ValueError("Non-compatible metalearner selected for multi-label"\
+ raise ValueError("Non-compatible metalearner selected for multi-label"
+ " classification. Supported metalearner:",
multilabel_metalearner_supported)
@@ -141,10 +151,12 @@ def block_train(config):
"pretrained_weights": True,
}
# Select input shape for 3D
- if config["three_dim"] : nn_paras["input_shape"] = config["shape_3D"]
+ if config["three_dim"]: nn_paras["input_shape"] = config["shape_3D"]
# Select task type
- if config["multi_label"] : nn_paras["activation_output"] = "sigmoid"
- else : nn_paras["activation_output"] = "softmax"
+ if config["multi_label"]:
+ nn_paras["activation_output"] = "sigmoid"
+ else:
+ nn_paras["activation_output"] = "softmax"
# Initialize Augmentation for 2D image data
if not config["three_dim"]:
@@ -161,7 +173,8 @@ def block_train(config):
mirror=True, rotate=True, scale=True,
elastic_transform=True, gaussian_noise=False,
brightness=False, contrast=False, gamma=True)
- else : data_aug = None
+ else:
+ data_aug = None
# Subfunctions
sf_list = []
@@ -184,8 +197,10 @@ def block_train(config):
"image_format": image_format,
"workers": config["workers"],
}
- if not config["three_dim"] : paras_datagen["loader"] = image_loader
- else : paras_datagen["loader"] = sitk_loader
+ if not config["three_dim"]:
+ paras_datagen["loader"] = image_loader
+ else:
+ paras_datagen["loader"] = sitk_loader
# Gather training parameters
paras_train = {
@@ -199,8 +214,10 @@ def block_train(config):
# Apply MIC pipelines
if config["analysis"] == "minimal":
# Setup neural network
- if not config["three_dim"] : arch_dim = "2D." + config["architecture"]
- else : arch_dim = "3D." + config["architecture"]
+ if not config["three_dim"]:
+ arch_dim = "2D." + config["architecture"]
+ else:
+ arch_dim = "3D." + config["architecture"]
model = NeuralNetwork(architecture=arch_dim, **nn_paras)
# Build DataGenerator
@@ -218,8 +235,10 @@ def block_train(config):
model.dump(path_model)
elif config["analysis"] == "standard":
# Setup neural network
- if not config["three_dim"] : arch_dim = "2D." + config["architecture"]
- else : arch_dim = "3D." + config["architecture"]
+ if not config["three_dim"]:
+ arch_dim = "2D." + config["architecture"]
+ else:
+ arch_dim = "3D." + config["architecture"]
model = NeuralNetwork(architecture=arch_dim, **nn_paras)
# Apply percentage split sampling
@@ -252,13 +271,15 @@ def block_train(config):
else:
# Sanity check of architecutre config
if not isinstance(config["architecture"], list):
- raise ValueError("key 'architecture' in config has to be a list " \
+ raise ValueError("key 'architecture' in config has to be a list "
+ "if 'advanced' was selected as analysis.")
# Build multi-model list
model_list = []
for arch in config["architecture"]:
- if not config["three_dim"] : arch_dim = "2D." + arch
- else : arch_dim = "3D." + arch
+ if not config["three_dim"]:
+ arch_dim = "2D." + arch
+ else:
+ arch_dim = "3D." + arch
model_part = NeuralNetwork(architecture=arch_dim, **nn_paras)
model_list.append(model_part)
el = Composite(model_list, metalearner=config["metalearner"],
diff --git a/aucmedi/automl/cli.py b/aucmedi/automl/cli.py
index 6d308cda..2e4b702e 100644
--- a/aucmedi/automl/cli.py
+++ b/aucmedi/automl/cli.py
@@ -26,10 +26,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import argparse
+
+# Third Party Libraries
import pkg_resources
-import sys
+
#-----------------------------------------------------#
# CLI - General #
@@ -52,6 +54,7 @@ def cli_core():
# Return parsers
return parser, subparsers
+
# #-----------------------------------------------------#
# # CLI - YAML #
# #-----------------------------------------------------#
@@ -107,7 +110,7 @@ def cli_training(subparsers):
??? info "List of Metalearner"
- Homogeneous pooling functions: [Aggregate][aucmedi.ensemble.aggregate]
- Heterogeneous pooling functions: [Metalearner][aucmedi.ensemble.metalearner]
- """
+ """ # noqa E501
# Set description for cli training
desc = """ Pipeline hub for Training via AUCMEDI AutoML """
# Setup SubParser
@@ -119,34 +122,31 @@ def cli_training(subparsers):
type=str,
required=False,
default="training",
- help="Path to the directory containing the images " + \
- "(default: '%(default)s')",
+ help="Path to the directory containing the images (default: '%(default)s')",
)
od.add_argument("--path_modeldir",
type=str,
required=False,
default="model",
- help="Path to the output directory in which fitted " + \
- "models and metadata are stored " + \
- "(default: '%(default)s')",
+ help="Path to the output directory in which fitted models and metadata are stored "
+ + "(default: '%(default)s')",
)
od.add_argument("--path_gt",
type=str,
required=False,
- help="Path to the index/class annotation CSV file " + \
- "(only required for defining the ground truth via " + \
- "'csv' instead of 'directory' interface)",
+ help="Path to the index/class annotation CSV file "
+ + "(only required for defining the ground truth via "
+ + "'csv' instead of 'directory' interface)",
)
od.add_argument("--ohe",
action="store_true",
required=False,
default=False,
- help="Boolean option whether annotation data is sparse " + \
- "categorical or one-hot encoded " + \
- "(only required for interface 'csv' and multi-" + \
- "label data, " + \
- "default: '%(default)s')",
+ help="Boolean option whether annotation data is sparse "
+ + "categorical or one-hot encoded "
+ + "(only required for interface 'csv' and multi-"
+ + "label data, default: '%(default)s')",
)
# Add configuration arguments
@@ -156,64 +156,56 @@ def cli_training(subparsers):
required=False,
default="standard",
choices=["minimal", "standard", "advanced"],
- help="Analysis mode for the AutoML training " + \
- "(default: '%(default)s')",
+ help="Analysis mode for the AutoML training (default: '%(default)s')",
)
oc.add_argument("--three_dim",
action="store_true",
required=False,
default=False,
- help="Boolean, whether imaging data is 2D or 3D " + \
- "(default: '%(default)s')",
+ help="Boolean, whether imaging data is 2D or 3D (default: '%(default)s')",
)
oc.add_argument("--shape_3D",
type=str,
required=False,
default="128x128x128",
- help="Desired input shape of 3D volume for architecture "+ \
- "(will be cropped into, " + \
- "format: '1x2x3', " + \
- "default: '%(default)s')",
+ help="Desired input shape of 3D volume for architecture (will be cropped into, "
+ + "format: '1x2x3', default: '%(default)s')",
)
oc.add_argument("--epochs",
type=int,
required=False,
default=500,
- help="Number of epochs. A single epoch is defined as " + \
- "one iteration through the complete data set " + \
- "(default: '%(default)s')",
+ help="Number of epochs. A single epoch is defined as "
+ + "one iteration through the complete data set "
+ + "(default: '%(default)s')",
)
oc.add_argument("--batch_size",
type=int,
required=False,
default=24,
- help="Number of samples inside a single batch " + \
- "(default: '%(default)s')",
+ help="Number of samples inside a single batch (default: '%(default)s')",
)
oc.add_argument("--workers",
type=int,
required=False,
default=1,
- help="Number of workers/threads which preprocess " + \
- "batches during runtime " + \
- "(default: '%(default)s')",
+ help="Number of workers/threads which preprocess batches during runtime (default: '%(default)s')",
)
oc.add_argument("--metalearner",
type=str,
required=False,
default="mean",
- help="Key for Metalearner or Aggregate function "+ \
- "(default: '%(default)s')",
+ help="Key for Metalearner or Aggregate function (default: '%(default)s')",
)
oc.add_argument("--architecture",
type=str,
required=False,
default="DenseNet121",
- help="Key of single or multiple Architectures " + \
- "(multiple Architectures are only supported for " + \
- "'analysis=advanced', " + \
- "format: 'KEY' or 'KEY,KEY,KEY', " + \
- "default: '%(default)s')",
+ help="Key of single or multiple Architectures "
+ + "(multiple Architectures are only supported for "
+ + "'analysis=advanced', "
+ + "format: 'KEY' or 'KEY,KEY,KEY', "
+ + "default: '%(default)s')",
)
# Add other arguments
@@ -223,6 +215,7 @@ def cli_training(subparsers):
action="help",
help="show this help message and exit")
+
#-----------------------------------------------------#
# CLI - Prediction #
#-----------------------------------------------------#
@@ -243,7 +236,7 @@ def cli_prediction(subparsers):
??? info "List of XAI Methods"
AUCMEDI provides a large library of state-of-the-art and ready-to-use XAI methods:
[aucmedi.xai.methods][]
- """
+ """ # noqa E501
# Set description for cli prediction
desc = """ Pipeline hub for Inference via AUCMEDI AutoML """
# Setup SubParser
@@ -257,24 +250,21 @@ def cli_prediction(subparsers):
type=str,
required=False,
default="test",
- help="Path to the directory containing the images " + \
- "(default: '%(default)s')",
+ help="Path to the directory containing the images (default: '%(default)s')",
)
od.add_argument("--path_modeldir",
type=str,
required=False,
default="model",
- help="Path to the model directory in which fitted " + \
- "model weights and metadata are stored " + \
- "(default: '%(default)s')",
+ help="Path to the model directory in which fitted model weights and metadata are stored "
+ + "(default: '%(default)s')",
)
od.add_argument("--path_pred",
type=str,
required=False,
default="preds.csv",
- help="Path to the output file in which predicted csv " + \
- "file should be stored " + \
- "(default: '%(default)s')",
+ help="Path to the output file in which predicted csv file should be stored "
+ + "(default: '%(default)s')",
)
# Add configuration arguments
@@ -282,31 +272,27 @@ def cli_prediction(subparsers):
oc.add_argument("--xai_method",
type=str,
required=False,
- help="Key for XAI method " + \
- "(default: '%(default)s')",
+ help="Key for XAI method (default: '%(default)s')",
)
oc.add_argument("--xai_directory",
type=str,
required=False,
default="xai",
- help="Path to the output directory in which predicted " + \
- "image xai heatmaps should be stored " + \
- "(default: '%(default)s')",
+ help="Path to the output directory in which predicted "
+ + "image xai heatmaps should be stored "
+ + "(default: '%(default)s')",
)
oc.add_argument("--batch_size",
type=int,
required=False,
default=12,
- help="Number of samples inside a single batch " + \
- "(default: '%(default)s')",
+ help="Number of samples inside a single batch (default: '%(default)s')",
)
oc.add_argument("--workers",
type=int,
required=False,
default=1,
- help="Number of workers/threads which preprocess " + \
- "batches during runtime " + \
- "(default: '%(default)s')",
+ help="Number of workers/threads which preprocess batches during runtime (default: '%(default)s')",
)
# Add other arguments
@@ -316,6 +302,7 @@ def cli_prediction(subparsers):
action="help",
help="show this help message and exit")
+
#-----------------------------------------------------#
# CLI - Evaluation #
#-----------------------------------------------------#
@@ -330,7 +317,7 @@ def cli_evaluation(subparsers):
| I/O | `--path_pred` | str | `preds.csv` | Path to the input file in which predicted csv file is stored. |
| I/O | `--path_evaldir` | str | `evaluation` | Path to the directory in which evaluation figures and tables should be stored. |
| Other | `--help` | bool | `False` | show this help message and exit. |
- """
+ """ # noqa E501
# Set description for cli evaluation
desc = """ Pipeline hub for Evaluation via AUCMEDI AutoML """
# Setup SubParser
@@ -344,42 +331,37 @@ def cli_evaluation(subparsers):
type=str,
required=False,
default="training",
- help="Path to the directory containing the ground truth" + \
- " images " + \
- "(default: '%(default)s')",
+ help="Path to the directory containing the ground truth images (default: '%(default)s')",
)
od.add_argument("--path_gt",
type=str,
required=False,
- help="Path to the index/class annotation CSV file " + \
- "(only required for defining the ground truth via " + \
- "'csv' instead of 'directory' interface)",
+ help="Path to the index/class annotation CSV file "
+ + "(only required for defining the ground truth via "
+ + "'csv' instead of 'directory' interface)",
)
od.add_argument("--ohe",
action="store_true",
required=False,
default=False,
- help="Boolean option whether annotation data is sparse " + \
- "categorical or one-hot encoded " + \
- "(only required for interface 'csv' and multi-" + \
- "label data, " + \
- "default: '%(default)s')",
+ help="Boolean option whether annotation data is sparse "
+ + "categorical or one-hot encoded "
+ + "(only required for interface 'csv' and multi-"
+ + "label data, "
+ + "default: '%(default)s')",
)
od.add_argument("--path_pred",
type=str,
required=False,
default="preds.csv",
- help="Path to the output file in which predicted csv " + \
- "file are stored " + \
- "(default: '%(default)s')",
+ help="Path to the output file in which predicted csv file are stored (default: '%(default)s')",
)
od.add_argument("--path_evaldir",
type=str,
required=False,
default="evaluation",
- help="Path to the directory in which evaluation " + \
- "figures and tables should be stored " + \
- "(default: '%(default)s')",
+ help="Path to the directory in which evaluation figures and tables should be stored "
+ + "(default: '%(default)s')",
)
# Add other arguments
diff --git a/aucmedi/automl/main.py b/aucmedi/automl/main.py
index 4ac7afd1..3dd25427 100644
--- a/aucmedi/automl/main.py
+++ b/aucmedi/automl/main.py
@@ -30,11 +30,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import sys
-# Internal libraries
-from aucmedi.automl import *
-from aucmedi.automl.cli import *
+
+# Internal Libraries
+from aucmedi.automl import block_evaluate, block_predict, block_train, parse_cli, parse_yaml
+from aucmedi.automl.cli import cli_core, cli_evaluation, cli_prediction, cli_training
+
#-----------------------------------------------------#
# Main Method - Runner #
@@ -52,22 +54,26 @@ def main():
cli_evaluation(subparsers)
# Help page hook for passing no parameters
- if len(sys.argv)<=1:
+ if len(sys.argv) <= 1:
parser.print_help(sys.stderr)
sys.exit(1)
# Parse arguments
- else : args = parser.parse_args()
+ else:
+ args = parser.parse_args()
# Call corresponding cli or yaml parser
- if args.hub == "yaml" : config = parse_yaml(args)
- else : config = parse_cli(args)
+ if args.hub == "yaml":
+ config = parse_yaml(args)
+ else:
+ config = parse_cli(args)
# Run training pipeline
- if config["hub"] == "training" : block_train(config)
+ if config["hub"] == "training": block_train(config)
# Run prediction pipeline
- if config["hub"] == "prediction" : block_predict(config)
+ if config["hub"] == "prediction": block_predict(config)
# Run evaluation pipeline
- if config["hub"] == "evaluation" : block_evaluate(config)
+ if config["hub"] == "evaluation": block_evaluate(config)
+
# Runner for direct script call
if __name__ == "__main__":
diff --git a/aucmedi/automl/parser_cli.py b/aucmedi/automl/parser_cli.py
index a46dd92c..579729bd 100644
--- a/aucmedi/automl/parser_cli.py
+++ b/aucmedi/automl/parser_cli.py
@@ -19,8 +19,11 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-# Internal libraries
+# Python Standard Library
+# Third Party Libraries
+# Internal Libraries
+
+
#-----------------------------------------------------#
# Parser - CLI #
#-----------------------------------------------------#
diff --git a/aucmedi/automl/parser_yaml.py b/aucmedi/automl/parser_yaml.py
index a84912bf..228da984 100644
--- a/aucmedi/automl/parser_yaml.py
+++ b/aucmedi/automl/parser_yaml.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+# Third Party Libraries
+# Internal Libraries
-# Internal libraries
#-----------------------------------------------------#
# Parser - YAML #
diff --git a/aucmedi/data_processing/__init__.py b/aucmedi/data_processing/__init__.py
index a124eb12..e800c1a2 100644
--- a/aucmedi/data_processing/__init__.py
+++ b/aucmedi/data_processing/__init__.py
@@ -21,7 +21,8 @@
#-----------------------------------------------------#
""" The data processing is a important part of any AUCMEDI pipeline and represented by two core pillars.
-- The [input_interface][aucmedi.data_processing.io_data.input_interface] function provides important information on the dataset.
+- The [input_interface][aucmedi.data_processing.io_data.input_interface] function provides important information on the
+ dataset.
- The [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator] class is a
powerful interface handling all data processing in AUCMEDI.
diff --git a/aucmedi/data_processing/augmentation/__init__.py b/aucmedi/data_processing/augmentation/__init__.py
index 9a2519a2..4adc27fc 100644
--- a/aucmedi/data_processing/augmentation/__init__.py
+++ b/aucmedi/data_processing/augmentation/__init__.py
@@ -81,10 +81,17 @@
resize=model.meta_input,
image_format=image_format)
```
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
from aucmedi.data_processing.augmentation.aug_image import ImageAugmentation
from aucmedi.data_processing.augmentation.aug_volume import VolumeAugmentation
from aucmedi.data_processing.augmentation.aug_batchgenerators import BatchgeneratorsAugmentation
+
+
+__all__ = [
+ "ImageAugmentation",
+ "VolumeAugmentation",
+ "BatchgeneratorsAugmentation"
+]
diff --git a/aucmedi/data_processing/augmentation/aug_batchgenerators.py b/aucmedi/data_processing/augmentation/aug_batchgenerators.py
index df999048..ce8fbf8e 100644
--- a/aucmedi/data_processing/augmentation/aug_batchgenerators.py
+++ b/aucmedi/data_processing/augmentation/aug_batchgenerators.py
@@ -19,13 +19,14 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Third Party Libraries
+import numpy as np
from batchgenerators.transforms.abstract_transforms import Compose
-from batchgenerators.transforms.spatial_transforms import MirrorTransform, SpatialTransform
-from batchgenerators.transforms.color_transforms import ContrastAugmentationTransform, GammaTransform, BrightnessMultiplicativeTransform
+from batchgenerators.transforms.color_transforms import (BrightnessMultiplicativeTransform,
+ ContrastAugmentationTransform, GammaTransform)
from batchgenerators.transforms.noise_transforms import GaussianNoiseTransform
-import warnings
-import numpy as np
+from batchgenerators.transforms.spatial_transforms import MirrorTransform, SpatialTransform
+
#-----------------------------------------------------#
# AUCMEDI Batchgenerators Augmentation #
@@ -115,7 +116,8 @@ def __init__(self, image_shape, mirror=False, rotate=True, scale=True,
mirror (bool): Boolean, whether mirroring should be performed as data augmentation.
rotate (bool): Boolean, whether rotations should be performed as data augmentation.
scale (bool): Boolean, whether scaling should be performed as data augmentation.
- elastic_transform (bool): Boolean, whether elastic deformation should be performed as data augmentation.
+ elastic_transform (bool): Boolean, whether elastic deformation should be performed as data
+ augmentation.
gaussian_noise (bool): Boolean, whether Gaussian noise should be added as data augmentation.
brightness (bool): Boolean, whether brightness changes should be added as data augmentation.
contrast (bool): Boolean, whether contrast changes should be added as data augmentation.
@@ -136,7 +138,8 @@ def __init__(self, image_shape, mirror=False, rotate=True, scale=True,
```
Attributes:
- refine (bool): Boolean, whether clipping to [0,255] should be performed if outside of range.
+ refine (bool): Boolean, whether clipping to [0,255] should be performed if outside of
+ range.
aug_mirror_p (float): Probability of mirroring application if activated. Default=0.5.
aug_rotate_p (float): Probability of rotation application if activated. Default=0.5.
aug_scale_p (float): Probability of scaling application if activated. Default=0.5.
diff --git a/aucmedi/data_processing/augmentation/aug_image.py b/aucmedi/data_processing/augmentation/aug_image.py
index 77314971..02880d44 100644
--- a/aucmedi/data_processing/augmentation/aug_image.py
+++ b/aucmedi/data_processing/augmentation/aug_image.py
@@ -19,13 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from albumentations import Compose
+# Python Standard Library
+import warnings
+
+# Third Party Libraries
import albumentations.augmentations as ai
import cv2
-import warnings
import numpy as np
-import random
+from albumentations import Compose
+
#-----------------------------------------------------#
# AUCMEDI Image Augmentation #
@@ -136,7 +138,8 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
gaussian_blur (bool): Boolean, whether gaussian blur should be added as data augmentation.
downscaling (bool): Boolean, whether downscaling should be added as data augmentation.
gamma (bool): Boolean, whether gamma changes should be added as data augmentation.
- elastic_transform (bool): Boolean, whether elastic deformation should be performed as data augmentation.
+ elastic_transform (bool): Boolean, whether elastic deformation should be performed as data
+ augmentation.
!!! warning
If class variables (attributes) are modified, the internal augmentation operator
@@ -153,7 +156,8 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
```
Attributes:
- refine (bool): Boolean, whether clipping to [0,255] and padding/cropping should be performed if outside of range.
+ refine (bool): Boolean, whether clipping to [0,255] and padding/cropping should be
+ performed if outside of range.
aug_flip_p (float): Probability of flipping application if activated. Default=0.5.
aug_rotate_p (float): Probability of rotation application if activated. Default=0.5.
aug_brightness_p (float): Probability of brightness application if activated. Default=0.5.
@@ -181,7 +185,7 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
self.aug_hue = hue
self.aug_compression = compression
self.aug_gaussianNoise = gaussian_noise
- self.aug_gaussianBlur= gaussian_blur
+ self.aug_gaussianBlur = gaussian_blur
self.aug_downscaling = downscaling
self.aug_gamma = gamma
self.aug_gridDistortion = grid_distortion
@@ -292,8 +296,7 @@ def apply(self, image):
aug_image = self.operator(image=image)["image"]
# Perform padding & cropping if image shape changed
if self.refine and aug_image.shape != org_shape:
- aug_image = ai.pad(aug_image, org_shape[0], org_shape[1], border_mode=cv2.BORDER_REPLICATE,
- value=0)
+ aug_image = ai.pad(aug_image, org_shape[0], org_shape[1], border_mode=cv2.BORDER_REPLICATE, value=0)
aug_image = ai.RandomCrop(height=org_shape[0], width=org_shape[1])(image=aug_image)["image"]
# Perform clipping if image is out of grayscale/RGB encodings
if self.refine and (np.min(aug_image) < 0 or np.max(aug_image) > 255):
diff --git a/aucmedi/data_processing/augmentation/aug_volume.py b/aucmedi/data_processing/augmentation/aug_volume.py
index 2d5ca035..141bda36 100644
--- a/aucmedi/data_processing/augmentation/aug_volume.py
+++ b/aucmedi/data_processing/augmentation/aug_volume.py
@@ -19,12 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from volumentations import Compose
-from volumentations import augmentations as ai
+# Python Standard Library
+import random
import warnings
+
+# Third Party Libraries
import numpy as np
-import random
+from volumentations import Compose
+from volumentations import augmentations as ai
+
#-----------------------------------------------------#
# AUCMEDI Volume Augmentation #
@@ -149,7 +152,8 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
gaussian_blur (bool): Boolean, whether gaussian blur should be added as data augmentation.
downscaling (bool): Boolean, whether downscaling should be added as data augmentation.
gamma (bool): Boolean, whether gamma changes should be added as data augmentation.
- elastic_transform (bool): Boolean, whether elastic deformation should be performed as data augmentation.
+ elastic_transform (bool): Boolean, whether elastic deformation should be performed as data
+ augmentation.
!!! warning
If class variables (attributes) are modified, the internal augmentation operator
@@ -166,7 +170,8 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
```
Attributes:
- refine (bool): Boolean, whether clipping to [0,255] and padding/cropping should be performed if outside of range.
+ refine (bool): Boolean, whether clipping to [0,255] and padding/cropping should be
+ performed if outside of range.
aug_flip_p (float): Probability of flipping application if activated. Default=0.5.
aug_rotate_p (float): Probability of rotation application if activated. Default=0.5.
aug_brightness_p (float): Probability of brightness application if activated. Default=0.5.
@@ -194,7 +199,7 @@ def __init__(self, flip=True, rotate=True, brightness=True, contrast=True,
self.aug_hue = hue
self.aug_compression = compression
self.aug_gaussianNoise = gaussian_noise
- self.aug_gaussianBlur= gaussian_blur
+ self.aug_gaussianBlur = gaussian_blur
self.aug_downscaling = downscaling
self.aug_gamma = gamma
self.aug_gridDistortion = grid_distortion
diff --git a/aucmedi/data_processing/data_generator.py b/aucmedi/data_processing/data_generator.py
index 9c47c722..cded02f6 100644
--- a/aucmedi/data_processing/data_generator.py
+++ b/aucmedi/data_processing/data_generator.py
@@ -20,16 +20,23 @@
# Library imports #
#-----------------------------------------------------#
# External libraries
-from tensorflow.keras.utils import Sequence
-import numpy as np
-from multiprocessing.pool import ThreadPool
-from itertools import repeat
-import tempfile
-import pickle
+# Python Standard Library
import os
+import pickle
+import tempfile
+from itertools import repeat
+from multiprocessing.pool import ThreadPool
+
+# Third Party Libraries
+import numpy as np
+from tensorflow.keras.utils import Sequence
+
+# Internal Libraries
+# Local Libraries
# Internal libraries
from aucmedi.data_processing.io_loader import image_loader
-from aucmedi.data_processing.subfunctions import Standardize, Resize
+from aucmedi.data_processing.subfunctions import Resize, Standardize
+
#-----------------------------------------------------#
# Keras Data Generator #
@@ -125,6 +132,7 @@ class DataGenerator(Sequence):
standardize_mode=my_model.meta_standardize) # "torch"
```
"""
+
#-----------------------------------------------------#
# Initialization #
#-----------------------------------------------------#
@@ -166,27 +174,36 @@ def __init__(self, samples, path_imagedir, labels=None, metadata=None,
path_imagedir (str): Path to the directory containing the images.
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
- metadata (numpy.ndarray): NumPy Array with additional metadata. Have to be shape (n_samples, meta_variables).
+ metadata (numpy.ndarray): NumPy Array with additional metadata. Have to be shape
+ (n_samples, meta_variables).
image_format (str): Image format to add at the end of the sample index for image loading.
- Provided by [input_interface][aucmedi.data_processing.io_data.input_interface].
- subfunctions (List of Subfunctions):List of Subfunctions class instances which will be SEQUENTIALLY executed on the data set.
+ Provided by
+ [input_interface][aucmedi.data_processing.io_data.input_interface].
+ subfunctions (List of Subfunctions):List of Subfunctions class instances which will be SEQUENTIALLY executed
+ on the data set.
batch_size (int): Number of samples inside a single batch.
- resize (tuple of int): Resizing shape consisting of a X and Y size. (optional Z size for Volumes)
+ resize (tuple of int): Resizing shape consisting of a X and Y size. (optional Z size for
+ Volumes)
standardize_mode (str): Standardization modus in which image intensity values are scaled.
- Calls the [Standardize][aucmedi.data_processing.subfunctions.standardize] Subfunction.
- data_aug (Augmentation Interface): Data Augmentation class instance which performs diverse augmentation techniques.
+ Calls the
+ [Standardize][aucmedi.data_processing.subfunctions.standardize]
+ Subfunction.
+ data_aug (Augmentation Interface): Data Augmentation class instance which performs diverse augmentation
+ techniques.
If `None` is provided, no augmentation will be performed.
shuffle (bool): Boolean, whether dataset should be shuffled.
grayscale (bool): Boolean, whether images are grayscale or RGB.
sample_weights (list of float): List of weights for samples. Can be computed via
[compute_sample_weights()][aucmedi.utils.class_weights.compute_sample_weights].
- workers (int): Number of workers. If n_workers > 1 = use multi-threading for image preprocessing.
- prepare_images (bool): Boolean, whether all images should be prepared and backup to disk before training.
- Recommended for large images or volumes to reduce CPU computing time.
+ workers (int): Number of workers. If n_workers > 1 = use multi-threading for image
+ preprocessing.
+ prepare_images (bool): Boolean, whether all images should be prepared and backup to disk before
+ training. Recommended for large images or volumes to reduce CPU
+ computing time.
loader (io_loader function): Function for loading samples/images from disk.
seed (int): Seed to ensure reproducibility for random function.
**kwargs (dict): Additional parameters for the sample loader.
- """
+ """ # noqa E501
# Cache class variables
self.samples = samples
self.labels = labels
@@ -216,10 +233,13 @@ def __init__(self, samples, path_imagedir, labels=None, metadata=None,
# Initialize Standardization Subfunction
if standardize_mode is not None:
self.sf_standardize = Standardize(mode=standardize_mode)
- else : self.sf_standardize = None
+ else:
+ self.sf_standardize = None
# Initialize Resizing Subfunction
- if resize is not None : self.sf_resize = Resize(shape=resize)
- else : self.sf_resize = None
+ if resize is not None:
+ self.sf_resize = Resize(shape=resize)
+ else:
+ self.sf_resize = None
# Sanity check for full sample list
if samples is not None and len(samples) == 0:
raise ValueError("Provided sample list is empty!", len(samples))
@@ -248,8 +268,8 @@ def __init__(self, samples, path_imagedir, labels=None, metadata=None,
# -> Preprocess images beforehand and store them to disk for fast usage later
if self.prepare_images:
self.prepare_dir_object = tempfile.TemporaryDirectory(
- prefix="aucmedi.tmp.",
- suffix=".data")
+ prefix="aucmedi.tmp.",
+ suffix=".data")
self.prepare_dir = self.prepare_dir_object.name
# Preprocess image for each index - Sequential
@@ -275,8 +295,10 @@ def __init__(self, samples, path_imagedir, labels=None, metadata=None,
def _get_batches_of_transformed_samples(self, index_array):
# Initialize Batch stack
batch_stack = ([],)
- if self.labels is not None : batch_stack += ([],)
- if self.sample_weights is not None : batch_stack += ([],)
+ if self.labels is not None:
+ batch_stack += ([],)
+ if self.sample_weights is not None:
+ batch_stack += ([],)
# Process image for each index - Sequential
if self.workers == 0 or self.workers == 1:
@@ -365,7 +387,8 @@ def preprocess_image(self, index, prepared_image=False, run_aug=True,
with open(path_img + ".pickle", "wb") as pickle_writer:
pickle.dump(img, pickle_writer)
# Return preprocessed image
- else : return img
+ else:
+ return img
#-----------------------------------------------------#
# Sample Generation Function #
@@ -379,7 +402,7 @@ def __getitem__(self, raw_idx):
self.__set_index_array__()
# Select samples for next batch
index_array = self.index_array[
- self.batch_size * idx : self.batch_size * (idx + 1)
+ self.batch_size * idx: self.batch_size * (idx + 1)
]
# Generate batch
return self._get_batches_of_transformed_samples(index_array)
@@ -392,14 +415,17 @@ def __len__(self):
return self.iterations
""" Configuration function for fixing the number of iterations. """
+
def set_length(self, iterations):
self.iterations = iterations
""" Configuration function for reseting the number of iterations. """
+
def reset_length(self):
self.iterations = self.max_iterations
""" Internal function for initializing and shuffling the index array. """
+
def __set_index_array__(self):
# Generate index array
self.index_array = np.arange(self.n)
@@ -413,5 +439,6 @@ def __set_index_array__(self):
self.index_array = np.random.permutation(self.n)
""" Internal function at the end of an epoch. """
+
def on_epoch_end(self):
- self.__set_index_array__()
\ No newline at end of file
+ self.__set_index_array__()
diff --git a/aucmedi/data_processing/io_data.py b/aucmedi/data_processing/io_data.py
index 5e0d12b2..f202fc82 100644
--- a/aucmedi/data_processing/io_data.py
+++ b/aucmedi/data_processing/io_data.py
@@ -19,9 +19,7 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-import os
-# Internal libraries
+# Internal Libraries
import aucmedi.data_processing.io_interfaces as io
#-----------------------------------------------------#
@@ -31,6 +29,7 @@
"npy", "nii", "gz", "mha"]
""" List of accepted image formats. """
+
#-----------------------------------------------------#
# Input Interface for AUCMEDI #
#-----------------------------------------------------#
@@ -87,21 +86,27 @@ def input_interface(interface, path_imagedir, path_data=None, training=True,
path_data (str): Path to the index/class annotation file if required. (csv/json)
training (bool): Boolean option whether annotation data is available.
ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot encoded.
- image_format (str): Force to use a specific image format. By default, image format is determined automatically.
+ image_format (str): Force to use a specific image format. By default, image format is determined
+ automatically.
**kwargs (dict): Additional parameters for the format interfaces.
Returns:
index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as `samples`.
class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as `labels`.
- class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as `n_labels`.
- class_names (list of str): List of names for corresponding classes. Used for later prediction storage or evaluation.
- image_format (str): Image format to add at the end of the sample index for image loading. Required in DataGenerator.
- """
+ class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as
+ `n_labels`.
+ class_names (list of str): List of names for corresponding classes. Used for later prediction storage or
+ evaluation.
+ image_format (str): Image format to add at the end of the sample index for image loading. Required
+ in DataGenerator.
+ """ # noqa E501
# Transform selected interface to lower case
interface = interface.lower()
# Pass image format if provided
- if image_format != None : allowed_image_formats = [image_format]
- else : allowed_image_formats = ACCEPTABLE_IMAGE_FORMATS
+ if image_format is not None:
+ allowed_image_formats = [image_format]
+ else:
+ allowed_image_formats = ACCEPTABLE_IMAGE_FORMATS
# Verify if provided interface is valid
if interface not in ["csv", "json", "directory"]:
raise Exception("Unknown interface code provided.", interface)
@@ -119,9 +124,11 @@ def input_interface(interface, path_imagedir, path_data=None, training=True,
ds_loader = io.csv_loader
additional_parameters = ["ohe_range", "col_sample", "col_class"]
for para in additional_parameters:
- if para in kwargs : parameters[para] = kwargs[para]
+ if para in kwargs:
+ parameters[para] = kwargs[para]
# Identify correct dataset loader and parameters for JSON format
- elif interface == "json" : ds_loader = io.json_loader
+ elif interface == "json":
+ ds_loader = io.json_loader
# Identify correct dataset loader and parameters for directory format
elif interface == "directory":
ds_loader = io.directory_loader
diff --git a/aucmedi/data_processing/io_interfaces/__init__.py b/aucmedi/data_processing/io_interfaces/__init__.py
index a8be2953..b539e5eb 100644
--- a/aucmedi/data_processing/io_interfaces/__init__.py
+++ b/aucmedi/data_processing/io_interfaces/__init__.py
@@ -31,10 +31,17 @@
| `"directory"` | [io_directory()][aucmedi.data_processing.io_interfaces.io_directory] | Storing class annotations in subdirectories. |
| `"json"` | [io_json()][aucmedi.data_processing.io_interfaces.io_json] | Storing class annotations in a JSON file. |
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
from aucmedi.data_processing.io_interfaces.io_csv import csv_loader
from aucmedi.data_processing.io_interfaces.io_json import json_loader
from aucmedi.data_processing.io_interfaces.io_directory import directory_loader
+
+
+__all__ = [
+ "csv_loader",
+ "json_loader",
+ "directory_loader"
+]
diff --git a/aucmedi/data_processing/io_interfaces/io_csv.py b/aucmedi/data_processing/io_interfaces/io_csv.py
index ec59e3f6..da1c38a0 100644
--- a/aucmedi/data_processing/io_interfaces/io_csv.py
+++ b/aucmedi/data_processing/io_interfaces/io_csv.py
@@ -19,11 +19,14 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+
+# Third Party Libraries
import numpy as np
import pandas as pd
+
#-----------------------------------------------------#
# Data Loader Interface based on CSV #
#-----------------------------------------------------#
@@ -67,24 +70,34 @@ def csv_loader(path_data, path_imagedir, allowed_image_formats,
path_imagedir (str): Path to the directory containing the images.
allowed_image_formats (list of str): List of allowed imaging formats. (provided by IO_Interface)
training (bool): Boolean option whether annotation data is available.
- ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot encoded.
- ohe_range (list of str): List of column name values if annotation encoded in OHE. Example: ["classA", "classB", "classC"]
+ ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot
+ encoded.
+ ohe_range (list of str): List of column name values if annotation encoded in OHE. Example:
+ ["classA", "classB", "classC"]
col_sample (str): Index column name for the sample name column. Default: 'SAMPLE'
- col_class (str): Index column name for the sparse categorical classes column. Default: 'CLASS'
+ col_class (str): Index column name for the sparse categorical classes column. Default:
+ 'CLASS'
Returns:
- index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as `samples`.
- class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as `labels`.
- class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as `n_labels`.
- class_names (list of str): List of names for corresponding classes. Used for later prediction storage or evaluation.
- image_format (str): Image format to add at the end of the sample index for image loading. Required in DataGenerator.
+ index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as
+ `samples`.
+ class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as
+ `labels`.
+ class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as
+ `n_labels`.
+ class_names (list of str): List of names for corresponding classes. Used for later prediction
+ storage or evaluation.
+ image_format (str): Image format to add at the end of the sample index for image loading.
+ Required in DataGenerator.
"""
# Load CSV file
dt = pd.read_csv(path_data, sep=",", header=0)
# Check if image index column exist and parse it
- if col_sample in dt.columns : index_list = dt[col_sample].tolist()
- else : raise Exception("Sample column (" + str(col_sample) + \
- ") not available in CSV file!", path_data)
+ if col_sample in dt.columns:
+ index_list = dt[col_sample].tolist()
+ else:
+ raise Exception("Sample column (" + str(col_sample) +
+ ") not available in CSV file!", path_data)
# Ensure index list to contain strings
index_list = [str(index) for index in index_list]
# Identify image format by peaking first image
@@ -93,18 +106,21 @@ def csv_loader(path_data, path_imagedir, allowed_image_formats,
format = file.split(".")[-1]
if format.lower() in allowed_image_formats or \
format.upper() in allowed_image_formats:
- image_format = format
- break
+ image_format = format
+ break
# Raise Exception if image format is unknown
if image_format is None:
raise Exception("Unknown image format.", path_imagedir)
# Check if image ending is already in sample name by peaking first one
- if index_list[0].endswith("." + image_format) : image_format = None
+ if index_list[0].endswith("." + image_format):
+ image_format = None
# Verify if all images are existing
for sample in index_list:
# Obtain image file path
- if image_format : img_file = sample + "." + image_format
- else : img_file = sample
+ if image_format:
+ img_file = sample + "." + image_format
+ else:
+ img_file = sample
path_img = os.path.join(path_imagedir, img_file)
# Check existance
if not os.path.exists(path_img):
@@ -112,7 +128,8 @@ def csv_loader(path_data, path_imagedir, allowed_image_formats,
'Sample: "' + sample + '"', path_img)
# If CSV is for inference (no annotation data) -> return parsing
- if not training : return index_list, None, None, None, image_format
+ if not training:
+ return index_list, None, None, None, image_format
# Try parsing with a sparse categorical class format (CSV Format 1)
if not ohe:
@@ -128,8 +145,10 @@ def csv_loader(path_data, path_imagedir, allowed_image_formats,
# Try parsing one-hot encoded format (CSV Format 2)
else:
# Identify OHE columns
- if ohe_range is None : ohe_columns = dt.loc[:, dt.columns != col_sample]
- else : ohe_columns = dt.loc[:, ohe_range]
+ if ohe_range is None:
+ ohe_columns = dt.loc[:, dt.columns != col_sample]
+ else:
+ ohe_columns = dt.loc[:, ohe_range]
# Parse information
class_names = list(ohe_columns.columns)
class_n = len(class_names)
diff --git a/aucmedi/data_processing/io_interfaces/io_directory.py b/aucmedi/data_processing/io_interfaces/io_directory.py
index d9f6dd4f..3a60344e 100644
--- a/aucmedi/data_processing/io_interfaces/io_directory.py
+++ b/aucmedi/data_processing/io_interfaces/io_directory.py
@@ -19,11 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
-import numpy as np
+
+# Third Party Libraries
import pandas as pd
+
#-----------------------------------------------------#
# Data Loader Interface based on Directories #
#-----------------------------------------------------#
@@ -79,11 +81,16 @@ def directory_loader(path_imagedir, allowed_image_formats, training=True):
training (bool): Boolean option whether annotation data is available.
Returns:
- index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as `samples`.
- class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as `labels`.
- class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as `n_labels`.
- class_names (list of str): List of names for corresponding classes. Used for later prediction storage or evaluation.
- image_format (str): Image format to add at the end of the sample index for image loading. Required in DataGenerator.
+ index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as
+ `samples`.
+ class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as
+ `labels`.
+ class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as
+ `n_labels`.
+ class_names (list of str): List of names for corresponding classes. Used for later prediction
+ storage or evaluation.
+ image_format (str): Image format to add at the end of the sample index for image loading.
+ Required in DataGenerator.
"""
# Initialize some variables
image_format = None
@@ -118,7 +125,7 @@ def directory_loader(path_imagedir, allowed_image_formats, training=True):
format = file.split(".")[-1]
if format.lower() in allowed_image_formats or \
format.upper() in allowed_image_formats:
- image_format = format
+ image_format = format
# Add sample to list
index_list.append(file[:-(len(format)+1)])
# Raise Exception if image format is unknown
diff --git a/aucmedi/data_processing/io_interfaces/io_json.py b/aucmedi/data_processing/io_interfaces/io_json.py
index fcb8ab15..525791f1 100644
--- a/aucmedi/data_processing/io_interfaces/io_json.py
+++ b/aucmedi/data_processing/io_interfaces/io_json.py
@@ -20,11 +20,15 @@
# Library imports #
#-----------------------------------------------------#
# External libraries
+# Python Standard Library
+import json
import os
+
+# Third Party Libraries
import numpy as np
-import json
import pandas as pd
+
#-----------------------------------------------------#
# Data Loader Interface based on JSON #
#-----------------------------------------------------#
@@ -59,14 +63,20 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
path_imagedir (str): Path to the directory containing the images.
allowed_image_formats (list of str): List of allowed imaging formats. (provided by IO_Interface)
training (bool): Boolean option whether annotation data is available.
- ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot encoded.
+ ohe (bool): Boolean option whether annotation data is sparse categorical or one-hot
+ encoded.
Returns:
- index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as `samples`.
- class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as `labels`.
- class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as `n_labels`.
- class_names (list of str): List of names for corresponding classes. Used for later prediction storage or evaluation.
- image_format (str): Image format to add at the end of the sample index for image loading. Required in DataGenerator.
+ index_list (list of str): List of sample/index encoded as Strings. Required in DataGenerator as
+ `samples`.
+ class_ohe (numpy.ndarray): Classification list as One-Hot encoding. Required in DataGenerator as
+ `labels`.
+ class_n (int): Number of classes. Required in NeuralNetwork for Architecture design as
+ `n_labels`.
+ class_names (list of str): List of names for corresponding classes. Used for later prediction
+ storage or evaluation.
+ image_format (str): Image format to add at the end of the sample index for image loading.
+ Required in DataGenerator.
"""
# Load JSON file
with open(path_data, "r") as json_reader:
@@ -77,8 +87,8 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
format = file.split(".")[-1]
if format.lower() in allowed_image_formats or \
format.upper() in allowed_image_formats:
- image_format = format
- break
+ image_format = format
+ break
# Raise Exception if image format is unknown
if image_format is None:
raise Exception("Unknown image format.", path_imagedir)
@@ -86,14 +96,18 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
# Verify if all images are existing
lever = True
for sample in dt_json:
- if sample == "legend" : continue
+ if sample == "legend":
+ continue
# Check if image ending is already in sample name by peaking first one
if lever:
lever = False
- if sample.endswith("." + image_format) : image_format = None
+ if sample.endswith("." + image_format):
+ image_format = None
# Obtain image file path
- if image_format : img_file = sample + "." + image_format
- else : img_file = sample
+ if image_format:
+ img_file = sample + "." + image_format
+ else:
+ img_file = sample
path_img = os.path.join(path_imagedir, img_file)
# Check existance
if not os.path.exists(path_img):
@@ -103,7 +117,8 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
# If JSON is for inference (no annotation data)
if not training:
# Ensure index list to contain strings
- if "legend" in dt_json : del dt_json["legend"]
+ if "legend" in dt_json:
+ del dt_json["legend"]
index_list = [str(x) for x in dt_json]
# -> return parsing
return index_list, None, None, None, image_format
@@ -114,14 +129,16 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
if "legend" in dt_json:
class_names = dt_json["legend"]
del dt_json["legend"]
- else : class_names = None
+ else:
+ class_names = None
# Obtain class information and index list
index_list = []
classes_sparse = []
for sample in dt_json:
index_list.append(str(sample))
classes_sparse.append(dt_json[sample])
- if class_names is None : class_names = np.unique(classes_sparse).tolist()
+ if class_names is None:
+ class_names = np.unique(classes_sparse).tolist()
class_n = len(class_names)
# Parse sparse categorical annotations to One-Hot Encoding
class_ohe = pd.get_dummies(classes_sparse).to_numpy()
@@ -143,7 +160,8 @@ def json_loader(path_data, path_imagedir, allowed_image_formats, training=True,
class_data.append(dt_json[sample])
class_ohe = np.array(class_data)
# Verify number of class annotation
- if class_n is None : class_ohe.shape[1]
+ if class_n is None:
+ class_ohe.shape[1]
# Validate if number of samples and number of annotations match
if len(index_list) != len(class_ohe):
diff --git a/aucmedi/data_processing/io_loader/__init__.py b/aucmedi/data_processing/io_loader/__init__.py
index cebfc4e6..94fa6790 100644
--- a/aucmedi/data_processing/io_loader/__init__.py
+++ b/aucmedi/data_processing/io_loader/__init__.py
@@ -55,7 +55,7 @@
image_format=image_format, resize=None,
loader=image_loader)
```
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
@@ -63,3 +63,11 @@
from aucmedi.data_processing.io_loader.numpy_loader import numpy_loader
from aucmedi.data_processing.io_loader.sitk_loader import sitk_loader
from aucmedi.data_processing.io_loader.cache_loader import cache_loader
+
+
+__all__ = [
+ "image_loader",
+ "numpy_loader",
+ "sitk_loader",
+ "cache_loader"
+]
diff --git a/aucmedi/data_processing/io_loader/cache_loader.py b/aucmedi/data_processing/io_loader/cache_loader.py
index a6c908fc..4c74cac6 100644
--- a/aucmedi/data_processing/io_loader/cache_loader.py
+++ b/aucmedi/data_processing/io_loader/cache_loader.py
@@ -19,9 +19,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
+
#-----------------------------------------------------#
# Cache Loader for AUCMEDI IO #
#-----------------------------------------------------#
@@ -29,7 +32,7 @@ def cache_loader(sample, path_imagedir=None, image_format=None,
grayscale=False, two_dim=True, cache=None, **kwargs):
""" Cache Loader for passing already loaded images within the AUCMEDI pipeline.
- The Cache Loader is an IO_loader function, which have to be passed to the
+ The Cache Loader is an IO_loader function, which have to be passed to the
[DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
The complete data management happens in the memory.
@@ -86,9 +89,9 @@ def cache_loader(sample, path_imagedir=None, image_format=None,
return img
# Throw Exception
else:
- raise ValueError("Parameter 2D & Grayscale: Expected either 2D " + \
- "without channel axis or 3D with single channel" + \
- " axis, but got:", img.shape, len(img.shape))
+ raise ValueError("Parameter 2D & Grayscale: Expected either 2D "
+ + "without channel axis or 3D with single channel"
+ + " axis, but got:", img.shape, len(img.shape))
# Verify image shape for grayscale & 3D
elif grayscale and not two_dim:
# Add channel axis and return image
@@ -99,9 +102,9 @@ def cache_loader(sample, path_imagedir=None, image_format=None,
return img
# Throw Exception
else:
- raise ValueError("Parameter 3D & Grayscale: Expected either 3D " + \
- "without channel axis or 4D with single channel" + \
- " axis, but got:", img.shape, len(img.shape))
+ raise ValueError("Parameter 3D & Grayscale: Expected either 3D "
+ + "without channel axis or 4D with single channel"
+ + " axis, but got:", img.shape, len(img.shape))
# Verify image shape for rgb & 2D
elif not grayscale and two_dim:
# Just return image
@@ -109,8 +112,8 @@ def cache_loader(sample, path_imagedir=None, image_format=None,
return img
# Throw Exception
else:
- raise ValueError("Parameter 2D & RGB: Expected 3D array " + \
- "including a single channel axis, but got:",
+ raise ValueError("Parameter 2D & RGB: Expected 3D array "
+ + "including a single channel axis, but got:",
img.shape, len(img.shape))
# Verify image shape for rgb & 3D
elif not grayscale and not two_dim:
@@ -119,6 +122,6 @@ def cache_loader(sample, path_imagedir=None, image_format=None,
return img
# Throw Exception
else:
- raise ValueError("Parameter 3D & RGB: Expected 4D array " + \
- "including a single channel axis, but got:",
+ raise ValueError("Parameter 3D & RGB: Expected 4D array "
+ + "including a single channel axis, but got:",
img.shape, len(img.shape))
diff --git a/aucmedi/data_processing/io_loader/image_loader.py b/aucmedi/data_processing/io_loader/image_loader.py
index e5334977..d7a90cab 100644
--- a/aucmedi/data_processing/io_loader/image_loader.py
+++ b/aucmedi/data_processing/io_loader/image_loader.py
@@ -19,11 +19,14 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+
+# Third Party Libraries
import numpy as np
from PIL import Image
+
#-----------------------------------------------------#
# Image Loader for AUCMEDI IO #
#-----------------------------------------------------#
@@ -69,14 +72,18 @@ def image_loader(sample, path_imagedir, image_format=None, grayscale=False,
**kwargs (dict): Additional parameters for the sample loader.
"""
# Get image path
- if image_format : img_file = sample + "." + image_format
- else : img_file = sample
+ if image_format:
+ img_file = sample + "." + image_format
+ else:
+ img_file = sample
path_img = os.path.join(path_imagedir, img_file)
# Load image via the PIL package
img_raw = Image.open(path_img)
# Convert image to grayscale or rgb
- if grayscale : img_converted = img_raw.convert('LA')
- else : img_converted = img_raw.convert('RGB')
+ if grayscale:
+ img_converted = img_raw.convert('LA')
+ else:
+ img_converted = img_raw.convert('RGB')
# Convert image to NumPy
img = np.asarray(img_converted)
# Perform additional preprocessing if grayscale image
diff --git a/aucmedi/data_processing/io_loader/numpy_loader.py b/aucmedi/data_processing/io_loader/numpy_loader.py
index 412b27bf..5f3880d0 100644
--- a/aucmedi/data_processing/io_loader/numpy_loader.py
+++ b/aucmedi/data_processing/io_loader/numpy_loader.py
@@ -19,10 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+
+# Third Party Libraries
import numpy as np
+
#-----------------------------------------------------#
# Numpy Loader for AUCMEDI IO #
#-----------------------------------------------------#
@@ -64,8 +67,10 @@ def numpy_loader(sample, path_imagedir, image_format=None, grayscale=False,
**kwargs (dict): Additional parameters for the sample loader.
"""
# Get image path
- if image_format : img_file = sample + "." + image_format
- else : img_file = sample
+ if image_format:
+ img_file = sample + "." + image_format
+ else:
+ img_file = sample
path_img = os.path.join(path_imagedir, img_file)
# Load image via the NumPy package
img = np.load(path_img, allow_pickle=True)
@@ -79,8 +84,8 @@ def numpy_loader(sample, path_imagedir, image_format=None, grayscale=False,
return img
# Throw Exception
else:
- raise ValueError("Parameter 2D & Grayscale: Expected either 2D " + \
- "without channel axis or 3D with single channel" + \
+ raise ValueError("Parameter 2D & Grayscale: Expected either 2D " +
+ "without channel axis or 3D with single channel" +
" axis, but got:", img.shape, len(img.shape))
# Verify image shape for grayscale & 3D
elif grayscale and not two_dim:
@@ -92,8 +97,8 @@ def numpy_loader(sample, path_imagedir, image_format=None, grayscale=False,
return img
# Throw Exception
else:
- raise ValueError("Parameter 3D & Grayscale: Expected either 3D " + \
- "without channel axis or 4D with single channel" + \
+ raise ValueError("Parameter 3D & Grayscale: Expected either 3D " +
+ "without channel axis or 4D with single channel" +
" axis, but got:", img.shape, len(img.shape))
# Verify image shape for rgb & 2D
elif not grayscale and two_dim:
@@ -102,7 +107,7 @@ def numpy_loader(sample, path_imagedir, image_format=None, grayscale=False,
return img
# Throw Exception
else:
- raise ValueError("Parameter 2D & RGB: Expected 3D array " + \
+ raise ValueError("Parameter 2D & RGB: Expected 3D array " +
"including a single channel axis, but got:",
img.shape, len(img.shape))
# Verify image shape for rgb & 3D
@@ -112,6 +117,6 @@ def numpy_loader(sample, path_imagedir, image_format=None, grayscale=False,
return img
# Throw Exception
else:
- raise ValueError("Parameter 3D & RGB: Expected 4D array " + \
+ raise ValueError("Parameter 3D & RGB: Expected 4D array " +
"including a single channel axis, but got:",
img.shape, len(img.shape))
diff --git a/aucmedi/data_processing/io_loader/sitk_loader.py b/aucmedi/data_processing/io_loader/sitk_loader.py
index 432ab262..a6154448 100644
--- a/aucmedi/data_processing/io_loader/sitk_loader.py
+++ b/aucmedi/data_processing/io_loader/sitk_loader.py
@@ -19,17 +19,21 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+
+# Third Party Libraries
import numpy as np
import SimpleITK as sitk
+
#-----------------------------------------------------#
# SITK Loader for AUCMEDI IO #
#-----------------------------------------------------#
def sitk_loader(sample, path_imagedir, image_format=None, grayscale=True,
resampling=(1.0, 1.0, 1.0), outside_value=0, **kwargs):
- """ SimpleITK Loader for loading of CT/MRI scans in NIfTI (nii) or Metafile (mha) format within the AUCMEDI pipeline.
+ """ SimpleITK Loader for loading of CT/MRI scans in NIfTI (nii) or Metafile (mha) format within the AUCMEDI
+ pipeline.
The SimpleITK Loader is an IO_loader function, which have to be passed to the
[DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
@@ -73,8 +77,10 @@ def sitk_loader(sample, path_imagedir, image_format=None, grayscale=True,
**kwargs (dict): Additional parameters for the sample loader.
"""
# Get image path
- if image_format : img_file = sample + "." + image_format
- else : img_file = sample
+ if image_format:
+ img_file = sample + "." + image_format
+ else:
+ img_file = sample
path_img = os.path.join(path_imagedir, img_file)
# Load image via the SimpleITK package
sample_itk = sitk.ReadImage(path_img)
@@ -101,10 +107,12 @@ def sitk_loader(sample, path_imagedir, image_format=None, grayscale=True,
sample_itk.GetDirection(),
outside_value)
# Skip resampling if None
- else : sample_itk_resampled = sample_itk
+ else:
+ sample_itk_resampled = sample_itk
# Convert to NumPy
img = sitk.GetArrayFromImage(sample_itk_resampled)
# Add single channel axis
- if len(img.shape) == 3 : img = np.expand_dims(img, axis=-1)
+ if len(img.shape) == 3:
+ img = np.expand_dims(img, axis=-1)
# Return image
return img
diff --git a/aucmedi/data_processing/subfunctions/__init__.py b/aucmedi/data_processing/subfunctions/__init__.py
index 320a7cc0..d1fae995 100644
--- a/aucmedi/data_processing/subfunctions/__init__.py
+++ b/aucmedi/data_processing/subfunctions/__init__.py
@@ -24,9 +24,9 @@
A Subfunction is a preprocessing method, which is automatically applied on all samples
if provided to a [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
- Image preprocessing is defined as a method or technique which modify the image before passing it to the neural network model.
- The aim of preprocessing methods is to extensively increase performance due to simplification of information.
- Common preprocessing methods range from intensity value normalization to image resizing.
+ Image preprocessing is defined as a method or technique which modify the image before passing it to the neural
+ network model. The aim of preprocessing methods is to extensively increase performance due to simplification of
+ information. Common preprocessing methods range from intensity value normalization to image resizing.
???+ info
The DataGenerator applies the list of Subfunctions **sequentially** on the data set.
@@ -55,7 +55,8 @@
subfunctions=sf_list) # Pass desired Subfunctions
```
- Subfunctions are based on the abstract base class [Subfunction_Base][aucmedi.data_processing.subfunctions.sf_base.Subfunction_Base],
+ Subfunctions are based on the abstract base class
+ [Subfunction_Base][aucmedi.data_processing.subfunctions.sf_base.Subfunction_Base],
which allow simple integration of custom preprocessing methods.
"""
#-----------------------------------------------------#
@@ -68,3 +69,14 @@
from aucmedi.data_processing.subfunctions.color_constancy import ColorConstancy
from aucmedi.data_processing.subfunctions.clip import Clip
from aucmedi.data_processing.subfunctions.chromer import Chromer
+
+
+__all__ = [
+ "Standardize",
+ "Resize",
+ "Padding",
+ "Crop",
+ "ColorConstancy",
+ "Clip",
+ "Chromer"
+]
diff --git a/aucmedi/data_processing/subfunctions/chromer.py b/aucmedi/data_processing/subfunctions/chromer.py
index 38d9e69e..ad9fbbb6 100644
--- a/aucmedi/data_processing/subfunctions/chromer.py
+++ b/aucmedi/data_processing/subfunctions/chromer.py
@@ -19,9 +19,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
#-----------------------------------------------------#
@@ -40,6 +43,8 @@
__init__ Object creation function.
transform: Apply chromer.
"""
+
+
class Chromer(Subfunction_Base):
""" A Subfunction class which which can be used for color format transforming.
@@ -77,15 +82,11 @@ def __init__(self, target="rgb"):
#---------------------------------------------#
def transform(self, image):
# Verify that image is in correct format
- if self.target == "rgb" and (image.shape[-1] != 1 or \
- np.max(image) > 255 or \
- np.min(image) < 0):
+ if self.target == "rgb" and (image.shape[-1] != 1 or np.max(image) > 255 or np.min(image) < 0):
raise ValueError("Subfunction Chromer: Image is not in grayscale format!",
"Ensure that it is grayscale normalized and has",
"a single channel.")
- elif self.target == "grayscale" and (image.shape[-1] != 3 or \
- np.max(image) > 255 or \
- np.min(image) < 0):
+ elif self.target == "grayscale" and (image.shape[-1] != 3 or np.max(image) > 255 or np.min(image) < 0):
raise ValueError("Subfunction Chromer: Image is not in RGB format!",
"Ensure that it is normalized [0,255] and has 3 channels.")
# Run grayscale -> RGB
diff --git a/aucmedi/data_processing/subfunctions/clip.py b/aucmedi/data_processing/subfunctions/clip.py
index 0ec98936..9637124c 100644
--- a/aucmedi/data_processing/subfunctions/clip.py
+++ b/aucmedi/data_processing/subfunctions/clip.py
@@ -19,11 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Clip #
#-----------------------------------------------------#
@@ -46,7 +48,8 @@ def __init__(self, min=None, max=None, per_channel=False):
Also possible to pass a list of minimum values if `per_channel=True`.
max (float or int or list): Desired maximum value for clipping (if `None`, no upper limit is applied).
Also possible to pass a list of maximum values if `per_channel=True`.
- per_channel (bool): Option if clipping should be applied per channel with different clipping ranges.
+ per_channel (bool): Option if clipping should be applied per channel with different clipping
+ ranges.
"""
self.min = min
self.max = max
@@ -64,18 +67,16 @@ def transform(self, image):
image_clipped = image.copy()
for c in range(0, image.shape[-1]):
# Identify minimum to clip
- if self.min is not None and \
- type(self.min) in [list, tuple, np.ndarray]:
+ if self.min is not None and type(self.min) in [list, tuple, np.ndarray]:
min = self.min[c]
- else : min = self.min
+ else:
+ min = self.min
# Identify maximum to clip
- if self.max is not None and \
- type(self.max) in [list, tuple, np.ndarray]:
+ if self.max is not None and type(self.max) in [list, tuple, np.ndarray]:
max = self.max[c]
- else : max = self.max
+ else:
+ max = self.max
# Perform clipping
- image_clipped[..., c] = np.clip(image[...,c],
- a_min=min,
- a_max=max)
+ image_clipped[..., c] = np.clip(image[...,c], a_min=min, a_max=max)
# Return clipped image
return image_clipped
diff --git a/aucmedi/data_processing/subfunctions/color_constancy.py b/aucmedi/data_processing/subfunctions/color_constancy.py
index d4f44d4e..d8ba122f 100644
--- a/aucmedi/data_processing/subfunctions/color_constancy.py
+++ b/aucmedi/data_processing/subfunctions/color_constancy.py
@@ -19,11 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Color Constancy #
#-----------------------------------------------------#
diff --git a/aucmedi/data_processing/subfunctions/crop.py b/aucmedi/data_processing/subfunctions/crop.py
index 17032017..749499ba 100644
--- a/aucmedi/data_processing/subfunctions/crop.py
+++ b/aucmedi/data_processing/subfunctions/crop.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import albumentations
import volumentations
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Cropping #
#-----------------------------------------------------#
@@ -45,7 +49,7 @@ class Crop(Subfunction_Base):
Cropping is done via volumentations CenterCrop and RandomCrop transform.
https://github.com/muellerdo/volumentations
- """
+ """ # noqa E501
#---------------------------------------------#
# Initialization #
#---------------------------------------------#
@@ -67,13 +71,15 @@ def __init__(self, shape=(224, 224), mode="center"):
elif len(shape) == 3:
params["shape"] = shape
mod = volumentations
- else : raise ValueError("Shape for cropping has to be 2D or 3D!", shape)
+ else:
+ raise ValueError("Shape for cropping has to be 2D or 3D!", shape)
# Initialize cropping transform
if mode == "center":
self.aug_transform = mod.Compose([mod.CenterCrop(**params)])
elif mode == "random":
self.aug_transform = mod.Compose([mod.RandomCrop(**params)])
- else : raise ValueError("Unknown mode for crop Subfunction", mode,
+ else:
+ raise ValueError("Unknown mode for crop Subfunction", mode,
"Possibles modes are: ['center', 'random']")
# Cache shape
self.shape = shape
diff --git a/aucmedi/data_processing/subfunctions/padding.py b/aucmedi/data_processing/subfunctions/padding.py
index c6c03d87..eaf412be 100644
--- a/aucmedi/data_processing/subfunctions/padding.py
+++ b/aucmedi/data_processing/subfunctions/padding.py
@@ -19,11 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Padding #
#-----------------------------------------------------#
@@ -63,18 +65,21 @@ def transform(self, image):
max_axis = max(image.shape[:-1])
new_shape = [max_axis for x in range(0, len(image.shape[:-1]))]
else:
- new_shape = [max(self.shape[i],image.shape[i]) \
+ new_shape = [max(self.shape[i],image.shape[i])
for i in range(0, len(image.shape[:-1]))]
# Compute padding width
- ## Code inspiration from: https://github.com/MIC-DKFZ/batchgenerators/blob/master/batchgenerators/augmentations/utils.py
- ## Leave a star for them if you are reading this. The MIC-DKFZ is doing some great work ;)
+ # Code inspiration from:
+ # https://github.com/MIC-DKFZ/batchgenerators/blob/master/batchgenerators/augmentations/utils.py
+ # Leave a star for them if you are reading this. The MIC-DKFZ is doing some great work ;)
difference = new_shape - np.asarray(image.shape[0:-1])
pad_below = difference // 2
pad_above = difference // 2 + difference % 2
pad_list = list([list(i) for i in zip(pad_below, pad_above)]) + [[0, 0]]
# Identify correct NumPy pad mode
- if self.mode == "square" : pad_mode = "edge"
- else : pad_mode = self.mode
+ if self.mode == "square":
+ pad_mode = "edge"
+ else:
+ pad_mode = self.mode
# Perform padding into desired shape
image_padded = np.pad(image, pad_list, mode=pad_mode)
# Return padded image
diff --git a/aucmedi/data_processing/subfunctions/resize.py b/aucmedi/data_processing/subfunctions/resize.py
index e715b9f9..a0c198f1 100644
--- a/aucmedi/data_processing/subfunctions/resize.py
+++ b/aucmedi/data_processing/subfunctions/resize.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import albumentations
import volumentations
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Resize #
#-----------------------------------------------------#
@@ -65,7 +69,8 @@ def __init__(self, shape=(224, 224), interpolation=1):
elif len(shape) == 3:
params["shape"] = shape
mod = volumentations
- else : raise ValueError("Shape for Resize has to be 2D or 3D!", shape)
+ else:
+ raise ValueError("Shape for Resize has to be 2D or 3D!", shape)
# Initialize resizing transform
self.aug_transform = mod.Compose([mod.Resize(**params)])
# Cache shape
diff --git a/aucmedi/data_processing/subfunctions/sf_base.py b/aucmedi/data_processing/subfunctions/sf_base.py
index fab80c9b..4f4e4f83 100644
--- a/aucmedi/data_processing/subfunctions/sf_base.py
+++ b/aucmedi/data_processing/subfunctions/sf_base.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
from abc import ABC, abstractmethod
+
#-----------------------------------------------------#
# Abstract Base Class for Subfunctions #
#-----------------------------------------------------#
@@ -65,6 +66,7 @@ def __init__(self):
The are no mandatory required parameters for the initialization.
"""
pass
+
#---------------------------------------------#
# Transformation #
#---------------------------------------------#
diff --git a/aucmedi/data_processing/subfunctions/standardize.py b/aucmedi/data_processing/subfunctions/standardize.py
index ff70fa9e..c1a4eff7 100644
--- a/aucmedi/data_processing/subfunctions/standardize.py
+++ b/aucmedi/data_processing/subfunctions/standardize.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from tensorflow.keras.applications import imagenet_utils
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from tensorflow.keras.applications import imagenet_utils
+
+# Internal Libraries
from aucmedi.data_processing.subfunctions.sf_base import Subfunction_Base
+
#-----------------------------------------------------#
# Subfunction class: Standardize #
#-----------------------------------------------------#
@@ -53,7 +57,7 @@ class Standardize(Subfunction_Base):
Keras preprocess_input() for `"tf", "caffe", "torch"`
https://www.tensorflow.org/api_docs/python/tf/keras/applications/imagenet_utils/preprocess_input
- """
+ """ # noqa E501
#---------------------------------------------#
# Initialization #
#---------------------------------------------#
@@ -84,7 +88,8 @@ def transform(self, image):
for c in range(0, image.shape[-1]):
image_norm[..., c] = self.normalize(image[..., c])
# Apply normalization across complete image
- else : image_norm = self.normalize(image)
+ else:
+ image_norm = self.normalize(image)
# Return standardized image
return image_norm
@@ -98,7 +103,7 @@ def normalize(self, image):
mean = np.mean(image)
std = np.std(image)
# Scaling
- image_norm = (image - mean + self.e) / (std + self.e)
+ image_norm = (image - mean + self.e) / (std + self.e)
# Perform MinMax normalization between [0,1]
elif self.mode == "minmax":
# Identify minimum and maximum
@@ -125,4 +130,4 @@ def normalize(self, image):
# Perform architecture standardization
image_norm = imagenet_utils.preprocess_input(image, mode=self.mode)
# Return normalized image
- return image_norm
\ No newline at end of file
+ return image_norm
diff --git a/aucmedi/ensemble/__init__.py b/aucmedi/ensemble/__init__.py
index e5740b57..2a12592a 100644
--- a/aucmedi/ensemble/__init__.py
+++ b/aucmedi/ensemble/__init__.py
@@ -43,7 +43,7 @@
An Analysis on Ensemble Learning optimized Medical Image Classification with Deep Convolutional Neural Networks.
arXiv e-print: https://arxiv.org/abs/2201.11440
-"""
+""" # noqa 501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
@@ -51,3 +51,11 @@
from aucmedi.ensemble.bagging import Bagging
from aucmedi.ensemble.stacking import Stacking
from aucmedi.ensemble.composite import Composite
+
+
+__all__ = [
+ "predict_augmenting",
+ "Bagging",
+ "Stacking",
+ "Composite"
+]
diff --git a/aucmedi/ensemble/aggregate/__init__.py b/aucmedi/ensemble/aggregate/__init__.py
index e366c0e9..ad91dec3 100644
--- a/aucmedi/ensemble/aggregate/__init__.py
+++ b/aucmedi/ensemble/aggregate/__init__.py
@@ -47,7 +47,8 @@
preds = predict_augmenting(model, test_datagen, n_cycles=5, aggregate=my_agg)
```
-Aggregate functions are based on the abstract base class [Aggregate_Base][aucmedi.ensemble.aggregate.agg_base.Aggregate_Base],
+Aggregate functions are based on the abstract base class
+[Aggregate_Base][aucmedi.ensemble.aggregate.agg_base.Aggregate_Base],
which allows simple integration of custom aggregate methods for Ensemble.
"""
#-----------------------------------------------------#
@@ -70,3 +71,13 @@
"global_argmax": GlobalArgmax,
}
""" Dictionary of implemented Aggregate functions. """
+
+
+__all__ = [
+ "aggregate_dict",
+ "AveragingMean",
+ "AveragingMedian",
+ "MajorityVote",
+ "Softmax",
+ "GlobalArgmax"
+]
diff --git a/aucmedi/ensemble/aggregate/agg_base.py b/aucmedi/ensemble/aggregate/agg_base.py
index 8d70b4b7..e402e0d0 100644
--- a/aucmedi/ensemble/aggregate/agg_base.py
+++ b/aucmedi/ensemble/aggregate/agg_base.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
from abc import ABC, abstractmethod
+
#-----------------------------------------------------#
# Abstract Base Class for Aggregation #
#-----------------------------------------------------#
@@ -70,6 +71,7 @@ def __init__(self):
There are no mandatory parameters for the initialization.
"""
pass
+
#---------------------------------------------#
# Aggregate #
#---------------------------------------------#
@@ -81,8 +83,9 @@ def aggregate(self, preds):
It is possible to pass configurations through the initialization function for this class.
Args:
- preds (numpy.ndarray): Assembled predictions encoded in a NumPy matrix with shape (N_models, N_classes).
+ preds (numpy.ndarray): Assembled predictions encoded in a NumPy matrix with shape
+ (N_models, N_classes).
Returns:
pred (numpy.ndarray): Merged prediction encoded in a NumPy matrix with shape (1, N_classes).
"""
- return pred
+ return pred # noqa F821
diff --git a/aucmedi/ensemble/aggregate/averaging_mean.py b/aucmedi/ensemble/aggregate/averaging_mean.py
index e9199a1a..dfb83327 100644
--- a/aucmedi/ensemble/aggregate/averaging_mean.py
+++ b/aucmedi/ensemble/aggregate/averaging_mean.py
@@ -19,11 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+
#-----------------------------------------------------#
# Aggregate: Averaging by Mean #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/aggregate/averaging_median.py b/aucmedi/ensemble/aggregate/averaging_median.py
index 3cde3542..82ecbbfa 100644
--- a/aucmedi/ensemble/aggregate/averaging_median.py
+++ b/aucmedi/ensemble/aggregate/averaging_median.py
@@ -19,11 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+
#-----------------------------------------------------#
# Aggregate: Averaging by Median #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/aggregate/global_argmax.py b/aucmedi/ensemble/aggregate/global_argmax.py
index 54c681dc..9b2dfb90 100644
--- a/aucmedi/ensemble/aggregate/global_argmax.py
+++ b/aucmedi/ensemble/aggregate/global_argmax.py
@@ -19,11 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+
#-----------------------------------------------------#
# Aggregate: Global Argmax #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/aggregate/majority_vote.py b/aucmedi/ensemble/aggregate/majority_vote.py
index 30af4445..184f940a 100644
--- a/aucmedi/ensemble/aggregate/majority_vote.py
+++ b/aucmedi/ensemble/aggregate/majority_vote.py
@@ -19,11 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+
#-----------------------------------------------------#
# Aggregate: Majority Vote #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/aggregate/softmax.py b/aucmedi/ensemble/aggregate/softmax.py
index 92213749..07e51283 100644
--- a/aucmedi/ensemble/aggregate/softmax.py
+++ b/aucmedi/ensemble/aggregate/softmax.py
@@ -19,11 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+
#-----------------------------------------------------#
# Aggregate: Softmax #
#-----------------------------------------------------#
@@ -50,6 +52,7 @@ def aggregate(self, preds):
# Return prediction
return pred
+
#-----------------------------------------------------#
# Subfunction: Softmax Formula #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/augmenting.py b/aucmedi/ensemble/augmenting.py
index 3b1c1ada..95a0ea9b 100644
--- a/aucmedi/ensemble/augmenting.py
+++ b/aucmedi/ensemble/augmenting.py
@@ -19,12 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Third Party Libraries
import numpy as np
-# Internal libraries
-from aucmedi import ImageAugmentation, VolumeAugmentation, DataGenerator
+
+# Internal Libraries
+from aucmedi import DataGenerator, ImageAugmentation, VolumeAugmentation
from aucmedi.ensemble.aggregate import aggregate_dict
-from aucmedi.data_processing.io_loader import image_loader
+
#-----------------------------------------------------#
# Ensemble Learning: Inference Augmenting #
@@ -55,7 +56,8 @@ def predict_augmenting(model, prediction_generator, n_cycles=10, aggregate="mean
- self-initialization with an AUCMEDI Aggregate function,
- use a string key to call an AUCMEDI Aggregate function by name, or
- - implementing a custom Aggregate function by extending the [AUCMEDI base class for Aggregate functions][aucmedi.ensemble.aggregate.agg_base]
+ - implementing a custom Aggregate function by extending the
+ [AUCMEDI base class for Aggregate functions][aucmedi.ensemble.aggregate.agg_base]
!!! info
Description and list of implemented Aggregate functions can be found here:
@@ -78,12 +80,14 @@ def predict_augmenting(model, prediction_generator, n_cycles=10, aggregate="mean
model (NeuralNetwork): Instance of a AUCMEDI neural network class.
prediction_generator (DataGenerator): A data generator which will be used for Augmenting based inference.
n_cycles (int): Number of image augmentations, which should be created per sample.
- aggregate (str or aggregate Function): Aggregate function class instance or a string for an AUCMEDI Aggregate function.
+ aggregate (str or aggregate Function): Aggregate function class instance or a string for an AUCMEDI Aggregate
+ function.
"""
# Initialize aggregate function if required
if isinstance(aggregate, str) and aggregate in aggregate_dict:
agg_fun = aggregate_dict[aggregate]()
- else : agg_fun = aggregate
+ else:
+ agg_fun = aggregate
# Initialize image augmentation if none provided (only flip, rotate)
if prediction_generator.data_aug is None and len(model.input_shape) == 3:
@@ -102,7 +106,8 @@ def predict_augmenting(model, prediction_generator, n_cycles=10, aggregate="mean
gamma=False, gaussian_noise=False,
gaussian_blur=False, downscaling=False,
elastic_transform=False)
- else : data_aug = prediction_generator.data_aug
+ else:
+ data_aug = prediction_generator.data_aug
# Multiply sample list for prediction according to number of cycles
samples_aug = np.repeat(prediction_generator.samples, n_cycles)
diff --git a/aucmedi/ensemble/bagging.py b/aucmedi/ensemble/bagging.py
index c756c764..42a044b2 100644
--- a/aucmedi/ensemble/bagging.py
+++ b/aucmedi/ensemble/bagging.py
@@ -19,17 +19,21 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+import shutil
import tempfile
-from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
-from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+
+# Third Party Libraries
import numpy as np
-import shutil
-# Internal libraries
+from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
+
+# Internal Libraries
from aucmedi import DataGenerator, NeuralNetwork
-from aucmedi.sampling import sampling_kfold
from aucmedi.ensemble.aggregate import aggregate_dict
+from aucmedi.sampling import sampling_kfold
+
#-----------------------------------------------------#
# Ensemble Learning: Bagging #
@@ -85,18 +89,21 @@ class Bagging:
Attention: Metrics are not passed to the processes due to pickling issues.
??? info "Technical Details"
- For the training and inference process, each model will create an individual process via the Python multiprocessing package.
+ For the training and inference process, each model will create an individual process via the Python
+ multiprocessing package.
This is crucial as TensorFlow does not fully support the VRAM memory garbage collection in GPUs,
which is why more and more redundant data pile up with an increasing number of k-fold.
- Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next fold model.
+ Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next
+ fold model.
??? reference "Reference for Ensemble Learning Techniques"
Dominik Müller, Iñaki Soto-Rey and Frank Kramer. (2022).
An Analysis on Ensemble Learning optimized Medical Image Classification with Deep Convolutional Neural Networks.
arXiv e-print: [https://arxiv.org/abs/2201.11440](https://arxiv.org/abs/2201.11440)
"""
+
def __init__(self, model, k_fold=3):
""" Initialization function for creating a Bagging object.
@@ -121,10 +128,12 @@ def train(self, training_generator, epochs=20, iterations=None,
It is also possible to pass custom Callback classes in order to obtain more information.
- For more information on the fitting process, check out [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
+ For more information on the fitting process, check out
+ [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
Args:
- training_generator (DataGenerator): A data generator which will be used for training (will be split according to k-fold sampling).
+ training_generator (DataGenerator): A data generator which will be used for training (will be split
+ according to k-fold sampling).
epochs (int): Number of epochs. A single epoch is defined as one iteration through
the complete data set.
iterations (int): Number of iterations (batches) in a single epoch.
@@ -133,7 +142,8 @@ def train(self, training_generator, epochs=20, iterations=None,
transfer_learning (bool): Option whether a transfer learning training should be performed.
Returns:
- history (dict): A history dictionary from a Keras history object which contains several logs.
+ history (dict): A history dictionary from a Keras history object which contains several
+ logs.
"""
temp_dg = training_generator # Template DataGenerator variable for faster access
history_bagging = {} # Final history dictionary
@@ -157,20 +167,22 @@ def train(self, training_generator, epochs=20, iterations=None,
if len(fold) == 4:
(train_x, train_y, test_x, test_y) = fold
data = (train_x, train_y, None, test_x, test_y, None)
- else : data = fold
+ else:
+ data = fold
# Create model specific callback list
callbacks_model = callbacks.copy()
# Extend Callback list
- cb_mc = ModelCheckpoint(os.path.join(self.cache_dir.name,
- "cv_" + str(i) + \
- ".model.keras"),
- monitor="val_loss", verbose=1,
- save_best_only=True, mode="min")
- cb_cl = CSVLogger(os.path.join(self.cache_dir.name,
- "cv_" + str(i) + \
- ".logs.csv"),
- separator=',', append=True)
+ cb_mc = ModelCheckpoint(
+ os.path.join(self.cache_dir.name, "cv_" + str(i) + ".model.keras"),
+ monitor="val_loss", verbose=1,
+ save_best_only=True, mode="min"
+ )
+ cb_cl = CSVLogger(
+ os.path.join(self.cache_dir.name,"cv_" + str(i) + ".logs.csv"),
+ separator=',',
+ append=True
+ )
callbacks_model.extend([cb_mc, cb_cl])
# Gather NeuralNetwork parameters
@@ -204,7 +216,7 @@ def train(self, training_generator, epochs=20, iterations=None,
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Gather training parameters
parameters_training = {"epochs": epochs,
@@ -212,7 +224,7 @@ def train(self, training_generator, epochs=20, iterations=None,
"callbacks": callbacks_model,
"class_weights": class_weights,
"transfer_learning": transfer_learning
- }
+ }
# Start training process
process_queue = mp.Queue()
@@ -236,13 +248,15 @@ def predict(self, prediction_generator, aggregate="mean",
return_ensemble=False):
""" Prediction function for the Bagging models.
- The fitted models will predict classifications for the provided [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
+ The fitted models will predict classifications for the provided
+ [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
The inclusion of the Aggregate function can be achieved in multiple ways:
- self-initialization with an AUCMEDI Aggregate function,
- use a string key to call an AUCMEDI Aggregate function by name, or
- - implementing a custom Aggregate function by extending the [AUCMEDI base class for Aggregate functions][aucmedi.ensemble.aggregate.agg_base]
+ - implementing a custom Aggregate function by extending the
+ [AUCMEDI base class for Aggregate functions][aucmedi.ensemble.aggregate.agg_base]
!!! info
Description and list of implemented Aggregate functions can be found here:
@@ -250,27 +264,31 @@ def predict(self, prediction_generator, aggregate="mean",
Args:
prediction_generator (DataGenerator): A data generator which will be used for inference.
- aggregate (str or aggregate Function): Aggregate function class instance or a string for an AUCMEDI Aggregate function.
+ aggregate (str or aggregate Function): Aggregate function class instance or a string for an AUCMEDI
+ Aggregate function.
return_ensemble (bool): Option, whether gathered ensemble of predictions should be returned.
Returns:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
- ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if `return_ensemble=True`.
- Shape (n_models, n_samples, n_labels).
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape
+ (n_samples, n_labels).
+ ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if
+ `return_ensemble=True`. Shape (n_models, n_samples, n_labels).
"""
# Verify if there is a linked cache dictionary
- con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and \
+ con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and
os.path.exists(self.cache_dir.name))
- con_var = (self.cache_dir is not None and \
- not isinstance(self.cache_dir, tempfile.TemporaryDirectory) \
+ con_var = (self.cache_dir is not None and
+ not isinstance(self.cache_dir, tempfile.TemporaryDirectory)
and os.path.exists(self.cache_dir))
if not con_tmp and not con_var:
- raise FileNotFoundError("Bagging does not have a valid model cache directory!")
+ raise FileNotFoundError(
+ "Bagging does not have a valid model cache directory!")
# Initialize aggregate function if required
if isinstance(aggregate, str) and aggregate in aggregate_dict:
agg_fun = aggregate_dict[aggregate]()
- else : agg_fun = aggregate
+ else:
+ agg_fun = aggregate
# Initialize some variables
temp_dg = prediction_generator
@@ -295,12 +313,13 @@ def predict(self, prediction_generator, aggregate="mean",
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Identify path to model directory
if isinstance(self.cache_dir, tempfile.TemporaryDirectory):
path_model_dir = self.cache_dir.name
- else : path_model_dir = self.cache_dir
+ else:
+ path_model_dir = self.cache_dir
# Sequentially iterate over all fold models
for i in range(self.k_fold):
@@ -340,15 +359,17 @@ def predict(self, prediction_generator, aggregate="mean",
# Aggregate predictions
preds_ensemble = np.array(preds_ensemble)
for i in range(0, len(temp_dg.samples)):
- pred_sample = agg_fun.aggregate(preds_ensemble[:,i,:])
+ pred_sample = agg_fun.aggregate(preds_ensemble[:, i, :])
preds_final.append(pred_sample)
# Convert prediction list to NumPy
preds_final = np.asarray(preds_final)
# Return ensembled predictions
- if return_ensemble : return preds_final, preds_ensemble
- else : return preds_final
+ if return_ensemble:
+ return preds_final, preds_ensemble
+ else:
+ return preds_final
# Dump model to file
def dump(self, directory_path):
@@ -361,7 +382,8 @@ def dump(self, directory_path):
directory_path (str): Path to store the model directory on disk.
"""
if self.cache_dir is None:
- raise FileNotFoundError("Bagging does not have a valid model cache directory!")
+ raise FileNotFoundError(
+ "Bagging does not have a valid model cache directory!")
elif isinstance(self.cache_dir, tempfile.TemporaryDirectory):
shutil.copytree(self.cache_dir.name, directory_path,
dirs_exist_ok=True)
@@ -387,11 +409,12 @@ def load(self, directory_path):
path_model = os.path.join(directory_path,
"cv_" + str(i) + ".model.keras")
if not os.path.exists(path_model):
- raise FileNotFoundError("Bagging model for fold " + str(i) + \
+ raise FileNotFoundError("Bagging model for fold " + str(i) +
" does not exist!", path_model)
# Update model directory
self.cache_dir = directory_path
+
#-----------------------------------------------------#
# Subroutines #
#-----------------------------------------------------#
@@ -443,6 +466,7 @@ def __training_process__(queue, model_paras, data, datagen_paras, train_paras):
# Store result in cache (which will be returned by the process queue)
queue.put(cv_history)
+
# Internal function for inference with a fitted NeuralNetwork model in a separate process
def __prediction_process__(queue, model_paras, path_model, datagen_paras):
# Create inference DataGenerator
diff --git a/aucmedi/ensemble/composite.py b/aucmedi/ensemble/composite.py
index e57d11af..00b7a681 100644
--- a/aucmedi/ensemble/composite.py
+++ b/aucmedi/ensemble/composite.py
@@ -19,20 +19,24 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+import shutil
import tempfile
-from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
-from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+
+# Third Party Libraries
import numpy as np
-import shutil
-# Internal libraries
+from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
+
+# Internal Libraries
from aucmedi import DataGenerator, NeuralNetwork
-from aucmedi.sampling import sampling_split, sampling_kfold
from aucmedi.ensemble.aggregate import aggregate_dict
+from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
from aucmedi.ensemble.metalearner import metalearner_dict
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
-from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+from aucmedi.sampling import sampling_kfold, sampling_split
+
#-----------------------------------------------------#
# Ensemble Learning: Composite #
@@ -77,7 +81,8 @@ class Composite:
```
!!! warning "Training Time Increase"
- Composite sequentially performs fitting processes for multiple models, which will drastically increase training time.
+ Composite sequentially performs fitting processes for multiple models, which will drastically increase training
+ time.
??? warning "DataGenerator re-initialization"
The passed DataGenerator for the train() and predict() function of the Composite class will be re-initialized!
@@ -88,7 +93,8 @@ class Composite:
NeuralNetwork model specific values (`model.meta_standardize` for `standardize_mode` and
`model.meta_input` for `input_shape`).
- If desired (but not recommended!), it is possible to modify the meta variables of the NeuralNetwork model as follows:
+ If desired (but not recommended!), it is possible to modify the meta variables of the NeuralNetwork model as
+ follows:
```python
# For input_shape
model_a = NeuralNetwork(n_labels=4, channels=3, architecture="2D.ResNet50",
@@ -104,27 +110,35 @@ class Composite:
Attention: Metrics are not passed to the processes due to pickling issues.
??? info "Technical Details"
- For the training and inference process, each model will create an individual process via the Python multiprocessing package.
+ For the training and inference process, each model will create an individual process via the Python
+ multiprocessing package.
This is crucial as TensorFlow does not fully support the VRAM memory garbage collection in GPUs,
which is why more and more redundant data pile up with an increasing number of models.
- Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next model.
+ Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next
+ model.
"""
+
def __init__(self, model_list, metalearner="logistic_regression",
k_fold=3, sampling=[0.85, 0.15], fixed_datagenerator=False):
""" Initialization function for creating a Composite object.
Args:
model_list (list of NeuralNetwork): List of instances of AUCMEDI neural network class.
- The number of models (`len(model_list)`) have to be equal to `k_fold`.
- metalearner (str, Metalearner or Aggregate):Metalearner class instance / a string for an AUCMEDI Metalearner,
- or Aggregate function / a string for an AUCMEDI Aggregate function.
- k_fold (int): Number of folds (k) for the Cross-Validation. Must be at least 2.
- sampling (list of float): List of percentage values with split sizes. Should be 2x percentage values
- for heterogenous metalearner (must sum up to 1.0).
- fixed_datagenerator (bool): Boolean, whether using fixed parameters of passed DataGenerator or
- using default architecture paramters for Resizing and Standardize.
+ The number of models (`len(model_list)`) have to be equal to
+ `k_fold`.
+ metalearner (str, Metalearner or Aggregate):Metalearner class instance / a string for an AUCMEDI
+ Metalearner, or Aggregate function / a string for an AUCMEDI
+ Aggregate function.
+ k_fold (int): Number of folds (k) for the Cross-Validation. Must be at least
+ 2.
+ sampling (list of float): List of percentage values with split sizes. Should be 2x
+ percentage values for heterogenous metalearner (must sum up to
+ 1.0).
+ fixed_datagenerator (bool): Boolean, whether using fixed parameters of passed DataGenerator
+ or using default architecture paramters for Resizing and
+ Standardize.
"""
# Cache class variables
self.model_list = model_list
@@ -141,14 +155,16 @@ def __init__(self, model_list, metalearner="logistic_regression",
elif isinstance(metalearner, str) and metalearner in aggregate_dict:
self.ml_model = aggregate_dict[metalearner]()
elif isinstance(metalearner, Metalearner_Base) or \
- isinstance(metalearner, Aggregate_Base):
+ isinstance(metalearner, Aggregate_Base):
self.ml_model = metalearner
- else : raise TypeError("Unknown type of Metalearner (neither known " + \
- "ensembler nor Aggregate or Metalearner class)!")
+ else:
+ raise TypeError("Unknown type of Metalearner (neither known " +
+ "ensembler nor Aggregate or Metalearner class)!")
# Verify model list length
if k_fold != len(model_list):
- raise ValueError("Length of model_list and k_fold has to be equal!")
+ raise ValueError(
+ "Length of model_list and k_fold has to be equal!")
# Set multiprocessing method to spawn
mp.set_start_method("spawn", force=True)
@@ -165,22 +181,24 @@ def train(self, training_generator, epochs=20, iterations=None,
It is also possible to pass custom Callback classes in order to obtain more information.
- For more information on the fitting process, check out [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
+ For more information on the fitting process, check out
+ [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
Args:
- training_generator (DataGenerator): A data generator which will be used for training (will be split according
- to percentage split and k-fold cross-validation sampling).
+ training_generator (DataGenerator): A data generator which will be used for training (will be split
+ according to percentage split and k-fold cross-validation sampling).
epochs (int): Number of epochs. A single epoch is defined as one iteration through
the complete data set.
iterations (int): Number of iterations (batches) in a single epoch.
callbacks (list of Callback classes): A list of Callback classes for custom evaluation.
class_weights (dictionary or list): A list or dictionary of float values to handle class unbalance.
transfer_learning (bool): Option whether a transfer learning training should be performed.
- metalearner_fitting (bool): Option whether the Metalearner fitting process should be included in the
- Composite training process. The `train_metalearner()` function can also be
- run manually (or repeatedly).
+ metalearner_fitting (bool): Option whether the Metalearner fitting process should be included in
+ the Composite training process. The `train_metalearner()` function
+ can also be run manually (or repeatedly).
Returns:
- history (dict): A history dictionary from a Keras history object which contains several logs.
+ history (dict): A history dictionary from a Keras history object which contains
+ several logs.
"""
temp_dg = training_generator # Template DataGenerator variable for faster access
history_composite = {} # Final history dictionary
@@ -200,8 +218,10 @@ def train(self, training_generator, epochs=20, iterations=None,
stratified=True, iterative=True,
seed=self.sampling_seed)
# Pack data according to sampling
- if len(ps_sampling[0]) == 3 : x, y, m = ps_sampling[0]
- else : x, y = ps_sampling[0]
+ if len(ps_sampling[0]) == 3:
+ x, y, m = ps_sampling[0]
+ else:
+ x, y = ps_sampling[0]
# Apply cross-validaton sampling
cv_sampling = sampling_kfold(x, y, m, n_splits=self.k_fold,
@@ -214,7 +234,8 @@ def train(self, training_generator, epochs=20, iterations=None,
if len(fold) == 4:
(train_x, train_y, test_x, test_y) = fold
data = (train_x, train_y, None, test_x, test_y, None)
- else : data = fold
+ else:
+ data = fold
# Create model specific callback list
callbacks_model = callbacks.copy()
@@ -225,8 +246,8 @@ def train(self, training_generator, epochs=20, iterations=None,
monitor="val_loss", verbose=1,
save_best_only=True, mode="min")
cb_cl = CSVLogger(os.path.join(self.cache_dir.name,
- "cv_" + str(i) + \
- ".logs.csv"),
+ "cv_" + str(i) +
+ ".logs.csv"),
separator=',', append=True)
callbacks_model.extend([cb_mc, cb_cl])
@@ -261,15 +282,15 @@ def train(self, training_generator, epochs=20, iterations=None,
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Gather training parameters
parameters_training = {"epochs": epochs,
- "iterations": iterations,
- "callbacks": callbacks_model,
- "class_weights": class_weights,
- "transfer_learning": transfer_learning
- }
+ "iterations": iterations,
+ "callbacks": callbacks_model,
+ "class_weights": class_weights,
+ "transfer_learning": transfer_learning
+ }
# Start training process
process_queue = mp.Queue()
@@ -304,11 +325,12 @@ def train_metalearner(self, training_generator):
re-training of the [NeuralNetwork][aucmedi.neural_network.model] models.
Args:
- training_generator (DataGenerator): A data generator which will be used for training (will be split according
- to percentage split).
+ training_generator (DataGenerator): A data generator which will be used for training (will be split
+ according to percentage split).
"""
# Skipping metalearner training if aggregate function
- if isinstance(self.ml_model, Aggregate_Base) : return
+ if isinstance(self.ml_model, Aggregate_Base):
+ return
temp_dg = training_generator # Template DataGenerator variable for faster access
preds_ensemble = []
@@ -324,13 +346,16 @@ def train_metalearner(self, training_generator):
stratified=True, iterative=True,
seed=self.sampling_seed)
# Pack data according to sampling
- if len(ps_sampling[0]) == 3 : data_ensemble = ps_sampling[1]
- else : data_ensemble = (*ps_sampling[1], None)
+ if len(ps_sampling[0]) == 3:
+ data_ensemble = ps_sampling[1]
+ else:
+ data_ensemble = (*ps_sampling[1], None)
# Identify path to model directory
if isinstance(self.cache_dir, tempfile.TemporaryDirectory):
path_model_dir = self.cache_dir.name
- else : path_model_dir = self.cache_dir
+ else:
+ path_model_dir = self.cache_dir
# Sequentially iterate over model list
for i in range(len(self.model_list)):
@@ -369,7 +394,7 @@ def train_metalearner(self, training_generator):
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Start inference process for model i
process_queue = mp.Queue()
@@ -417,18 +442,19 @@ def predict(self, prediction_generator, return_ensemble=False):
return_ensemble (bool): Option, whether gathered ensemble of predictions should be returned.
Returns:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
- ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if `return_ensemble=True`.
- Shape (n_models, n_samples, n_labels).
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape
+ (n_samples, n_labels).
+ ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if
+ `return_ensemble=True`. Shape (n_models, n_samples, n_labels).
"""
# Verify if there is a linked cache dictionary
- con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and \
+ con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and
os.path.exists(self.cache_dir.name))
- con_var = (self.cache_dir is not None and \
- not isinstance(self.cache_dir, tempfile.TemporaryDirectory) \
+ con_var = (self.cache_dir is not None and
+ not isinstance(self.cache_dir, tempfile.TemporaryDirectory)
and os.path.exists(self.cache_dir))
if not con_tmp and not con_var:
- raise FileNotFoundError("Composite instance does not have a valid" \
+ raise FileNotFoundError("Composite instance does not have a valid"
+ "model cache directory!")
# Initialize some variables
@@ -442,7 +468,8 @@ def predict(self, prediction_generator, return_ensemble=False):
# Identify path to model directory
if isinstance(self.cache_dir, tempfile.TemporaryDirectory):
path_model_dir = self.cache_dir.name
- else : path_model_dir = self.cache_dir
+ else:
+ path_model_dir = self.cache_dir
# Sequentially iterate over model list
for i in range(len(self.model_list)):
@@ -480,7 +507,7 @@ def predict(self, prediction_generator, return_ensemble=False):
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Start inference process for model i
process_queue = mp.Queue()
@@ -509,15 +536,17 @@ def predict(self, prediction_generator, return_ensemble=False):
# Apply homogeneous aggregate function
elif isinstance(self.ml_model, Aggregate_Base):
for i in range(preds_ensemble.shape[0]):
- pred_sample = self.ml_model.aggregate(preds_ensemble[i,:,:])
+ pred_sample = self.ml_model.aggregate(preds_ensemble[i, :, :])
preds_final.append(pred_sample)
# Convert prediction list to NumPy
preds_final = np.asarray(preds_final)
# Return ensembled predictions
- if return_ensemble : return preds_final, np.swapaxes(preds_ensemble,1,0)
- else : return preds_final
+ if return_ensemble:
+ return preds_final, np.swapaxes(preds_ensemble, 1, 0)
+ else:
+ return preds_final
# Dump model to file
def dump(self, directory_path):
@@ -530,7 +559,8 @@ def dump(self, directory_path):
directory_path (str): Path to store the model directory on disk.
"""
if self.cache_dir is None:
- raise FileNotFoundError("Composite does not have a valid model cache directory!")
+ raise FileNotFoundError(
+ "Composite does not have a valid model cache directory!")
elif isinstance(self.cache_dir, tempfile.TemporaryDirectory):
shutil.copytree(self.cache_dir.name, directory_path,
dirs_exist_ok=True)
@@ -556,7 +586,7 @@ def load(self, directory_path):
path_model = os.path.join(directory_path,
"cv_" + str(i) + ".model.keras")
if not os.path.exists(path_model):
- raise FileNotFoundError("Composite model " + str(i) + \
+ raise FileNotFoundError("Composite model " + str(i) +
" does not exist!", path_model)
# If heterogenous metalearner -> load metalearner model file
if isinstance(self.ml_model, Metalearner_Base):
@@ -570,9 +600,10 @@ def load(self, directory_path):
# Update model directory
self.cache_dir = directory_path
-#-----------------------------------------------------#
+
+# -----------------------------------------------------#
# Subroutines #
-#-----------------------------------------------------#
+# -----------------------------------------------------#
# Internal function for training a NeuralNetwork model in a separate process
def __training_process__(queue, data, model_paras, datagen_paras, train_paras):
# Extract data
@@ -622,6 +653,7 @@ def __training_process__(queue, data, model_paras, datagen_paras, train_paras):
# Store result in cache (which will be returned by the process queue)
queue.put(cv_history)
+
# Internal function for inference with a fitted NeuralNetwork model in a separate process
def __prediction_process__(queue, model_paras, path_model, data_test,
datagen_paras):
diff --git a/aucmedi/ensemble/metalearner/averaging_mean_weighted.py b/aucmedi/ensemble/metalearner/averaging_mean_weighted.py
index 6c3bbc80..dbacdf2e 100644
--- a/aucmedi/ensemble/metalearner/averaging_mean_weighted.py
+++ b/aucmedi/ensemble/metalearner/averaging_mean_weighted.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.metrics import roc_auc_score
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.metrics import roc_auc_score
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Weighted Mean #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/best_model.py b/aucmedi/ensemble/metalearner/best_model.py
index 645f8357..ddc68db2 100644
--- a/aucmedi/ensemble/metalearner/best_model.py
+++ b/aucmedi/ensemble/metalearner/best_model.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.metrics import roc_auc_score
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.metrics import roc_auc_score
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Best Model #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/decision_tree.py b/aucmedi/ensemble/metalearner/decision_tree.py
index 8a74b13c..617d4599 100644
--- a/aucmedi/ensemble/metalearner/decision_tree.py
+++ b/aucmedi/ensemble/metalearner/decision_tree.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.tree import DecisionTreeClassifier
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.tree import DecisionTreeClassifier
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Decision Tree #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/gaussian_process.py b/aucmedi/ensemble/metalearner/gaussian_process.py
index e8726919..c68d5c92 100644
--- a/aucmedi/ensemble/metalearner/gaussian_process.py
+++ b/aucmedi/ensemble/metalearner/gaussian_process.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.gaussian_process import GaussianProcessClassifier
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.gaussian_process import GaussianProcessClassifier
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Gaussian Process #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/k_neighbors.py b/aucmedi/ensemble/metalearner/k_neighbors.py
index 540c127d..54e6978b 100644
--- a/aucmedi/ensemble/metalearner/k_neighbors.py
+++ b/aucmedi/ensemble/metalearner/k_neighbors.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.neighbors import KNeighborsClassifier
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.neighbors import KNeighborsClassifier
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: k-Nearest Neighbors #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/logistic_regression.py b/aucmedi/ensemble/metalearner/logistic_regression.py
index f92847d9..2a082d17 100644
--- a/aucmedi/ensemble/metalearner/logistic_regression.py
+++ b/aucmedi/ensemble/metalearner/logistic_regression.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.linear_model import LogisticRegression as LRscikit
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.linear_model import LogisticRegression as LRscikit
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Logistic Regression #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/ml_base.py b/aucmedi/ensemble/metalearner/ml_base.py
index adca0c1c..4de2694d 100644
--- a/aucmedi/ensemble/metalearner/ml_base.py
+++ b/aucmedi/ensemble/metalearner/ml_base.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
from abc import ABC, abstractmethod
+
#-----------------------------------------------------#
# Abstract Base Class for Metalearner #
#-----------------------------------------------------#
@@ -77,7 +78,8 @@ def train(self, x, y):
""" Training function to fit the Metalearner model.
Args:
- x (numpy.ndarray): Assembled prediction dataset encoded in a NumPy matrix with shape (N_samples, N_classes*N_models).
+ x (numpy.ndarray): Assembled prediction dataset encoded in a NumPy matrix with shape
+ (N_samples, N_classes*N_models).
y (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
"""
@@ -94,7 +96,8 @@ def predict(self, data):
It is possible to pass configurations through the initialization function for this class.
Args:
- data (numpy.ndarray): Assembled predictions encoded in a NumPy matrix with shape (N_models, N_classes).
+ data (numpy.ndarray): Assembled predictions encoded in a NumPy matrix with shape
+ (N_models, N_classes).
Returns:
pred (numpy.ndarray): Merged prediction encoded in a NumPy matrix with shape (1, N_classes).
"""
diff --git a/aucmedi/ensemble/metalearner/mlp.py b/aucmedi/ensemble/metalearner/mlp.py
index 1df9fba7..eabe48c4 100644
--- a/aucmedi/ensemble/metalearner/mlp.py
+++ b/aucmedi/ensemble/metalearner/mlp.py
@@ -19,13 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
+
+# Third Party Libraries
from sklearn.neural_network import MLPClassifier
-import numpy as np
-# Internal libraries/scripts
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: MLP Neural Network #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/naive_bayes.py b/aucmedi/ensemble/metalearner/naive_bayes.py
index 128f1cfb..698b21be 100644
--- a/aucmedi/ensemble/metalearner/naive_bayes.py
+++ b/aucmedi/ensemble/metalearner/naive_bayes.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.naive_bayes import ComplementNB
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.naive_bayes import ComplementNB
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Naive Bayes #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/random_forest.py b/aucmedi/ensemble/metalearner/random_forest.py
index 32603361..4f313d4e 100644
--- a/aucmedi/ensemble/metalearner/random_forest.py
+++ b/aucmedi/ensemble/metalearner/random_forest.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.ensemble import RandomForestClassifier
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.ensemble import RandomForestClassifier
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Random Forest #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/metalearner/support_vector_machine.py b/aucmedi/ensemble/metalearner/support_vector_machine.py
index 3fe81580..5483bf4b 100644
--- a/aucmedi/ensemble/metalearner/support_vector_machine.py
+++ b/aucmedi/ensemble/metalearner/support_vector_machine.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import pickle
-from sklearn.svm import SVC
+
+# Third Party Libraries
import numpy as np
-# Internal libraries/scripts
+from sklearn.svm import SVC
+
+# Internal Libraries
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
+
#-----------------------------------------------------#
# Metalearner: Support Vector Machine #
#-----------------------------------------------------#
diff --git a/aucmedi/ensemble/stacking.py b/aucmedi/ensemble/stacking.py
index 46225d9a..050765f7 100644
--- a/aucmedi/ensemble/stacking.py
+++ b/aucmedi/ensemble/stacking.py
@@ -19,20 +19,24 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
import os
+import shutil
import tempfile
-from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
-from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+
+# Third Party Libraries
import numpy as np
-import shutil
-# Internal libraries
+from pathos.helpers import mp # instead of 'import multiprocessing as mp'
+from tensorflow.keras.callbacks import CSVLogger, ModelCheckpoint
+
+# Internal Libraries
from aucmedi import DataGenerator, NeuralNetwork
-from aucmedi.sampling import sampling_split
from aucmedi.ensemble.aggregate import aggregate_dict
+from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
from aucmedi.ensemble.metalearner import metalearner_dict
from aucmedi.ensemble.metalearner.ml_base import Metalearner_Base
-from aucmedi.ensemble.aggregate.agg_base import Aggregate_Base
+from aucmedi.sampling import sampling_split
+
#-----------------------------------------------------#
# Ensemble Learning: Stacking #
@@ -40,10 +44,10 @@
class Stacking:
""" A Stacking class providing functionality for metalearner based ensemble learning.
- In contrast to single algorithm approaches, the ensemble of different deep convolutional neural network architectures
- (also called heterogeneous ensemble learning) showed strong benefits for overall performance in several studies.
- The idea of the Stacking technique is to utilize diverse and independent models by stacking another machine learning
- algorithm on top of these predictions.
+ In contrast to single algorithm approaches, the ensemble of different deep convolutional neural network
+ architectures (also called heterogeneous ensemble learning) showed strong benefits for overall performance in
+ several studies. The idea of the Stacking technique is to utilize diverse and independent models by stacking another
+ machine learning algorithm on top of these predictions.
In AUCMEDI, a percentage split is applied on the dataset into the subsets: train, validation and ensemble.
@@ -77,7 +81,8 @@ class Stacking:
```
!!! warning "Training Time Increase"
- Stacking sequentially performs fitting processes for multiple models, which will drastically increase training time.
+ Stacking sequentially performs fitting processes for multiple models, which will drastically increase training
+ time.
??? warning "DataGenerator re-initialization"
The passed DataGenerator for the train() and predict() function of the Stacking class will be re-initialized!
@@ -88,7 +93,8 @@ class Stacking:
NeuralNetwork model specific values (`model.meta_standardize` for `standardize_mode` and
`model.meta_input` for `input_shape`).
- If desired (but not recommended!), it is possible to modify the meta variables of the NeuralNetwork model as follows:
+ If desired (but not recommended!), it is possible to modify the meta variables of the NeuralNetwork model as
+ follows:
```python
# For input_shape
model_a = NeuralNetwork(n_labels=4, channels=3, architecture="2D.ResNet50",
@@ -104,29 +110,33 @@ class Stacking:
Attention: Metrics are not passed to the processes due to pickling issues.
??? info "Technical Details"
- For the training and inference process, each model will create an individual process via the Python multiprocessing package.
+ For the training and inference process, each model will create an individual process via the Python
+ multiprocessing package.
This is crucial as TensorFlow does not fully support the VRAM memory garbage collection in GPUs,
which is why more and more redundant data pile up with an increasing number of models.
- Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next model.
+ Via separate processes, it is possible to clean up the TensorFlow environment and rebuild it again for the next
+ model.
??? reference "Reference for Ensemble Learning Techniques"
Dominik Müller, Iñaki Soto-Rey and Frank Kramer. (2022).
An Analysis on Ensemble Learning optimized Medical Image Classification with Deep Convolutional Neural Networks.
arXiv e-print: [https://arxiv.org/abs/2201.11440](https://arxiv.org/abs/2201.11440)
"""
+
def __init__(self, model_list, metalearner="logistic_regression",
sampling=[0.7, 0.1, 0.2]):
""" Initialization function for creating a Stacking object.
Args:
model_list (list of NeuralNetwork): List of instances of AUCMEDI neural network class.
- metalearner (str, Metalearner or Aggregate):Metalearner class instance / a string for an AUCMEDI Metalearner,
- or Aggregate function / a string for an AUCMEDI Aggregate function.
- sampling (list of float): List of percentage values with split sizes. Should be 3x percentage values
- for heterogenous metalearner and 2x percentage values for homogeneous
- Aggregate functions (must sum up to 1.0).
+ metalearner (str, Metalearner or Aggregate):Metalearner class instance / a string for an AUCMEDI
+ Metalearner, or Aggregate function / a string for an AUCMEDI
+ Aggregate function.
+ sampling (list of float): List of percentage values with split sizes. Should be 3x
+ percentage values for heterogenous metalearner and 2x percentage
+ values for homogeneous Aggregate functions (must sum up to 1.0).
"""
# Cache class variables
self.model_list = model_list
@@ -141,10 +151,11 @@ def __init__(self, model_list, metalearner="logistic_regression",
elif isinstance(metalearner, str) and metalearner in aggregate_dict:
self.ml_model = aggregate_dict[metalearner]()
elif isinstance(metalearner, Metalearner_Base) or \
- isinstance(metalearner, Aggregate_Base):
+ isinstance(metalearner, Aggregate_Base):
self.ml_model = metalearner
- else : raise TypeError("Unknown type of Metalearner (neither known " + \
- "ensembler nor Aggregate or Metalearner class)!")
+ else:
+ raise TypeError("Unknown type of Metalearner (neither known " +
+ "ensembler nor Aggregate or Metalearner class)!")
# Set multiprocessing method to spawn
mp.set_start_method("spawn", force=True)
@@ -160,22 +171,24 @@ def train(self, training_generator, epochs=20, iterations=None,
It is also possible to pass custom Callback classes in order to obtain more information.
- For more information on the fitting process, check out [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
+ For more information on the fitting process, check out
+ [NeuralNetwork.train()][aucmedi.neural_network.model.NeuralNetwork.train].
Args:
- training_generator (DataGenerator): A data generator which will be used for training (will be split according
- to percentage split sampling).
+ training_generator (DataGenerator): A data generator which will be used for training (will be split
+ according to percentage split sampling).
epochs (int): Number of epochs. A single epoch is defined as one iteration through
the complete data set.
iterations (int): Number of iterations (batches) in a single epoch.
callbacks (list of Callback classes): A list of Callback classes for custom evaluation.
class_weights (dictionary or list): A list or dictionary of float values to handle class unbalance.
transfer_learning (bool): Option whether a transfer learning training should be performed.
- metalearner_fitting (bool): Option whether the Metalearner fitting process should be included in the
- Stacking training process. The `train_metalearner()` function can also be
- run manually (or repeatedly).
+ metalearner_fitting (bool): Option whether the Metalearner fitting process should be included in
+ the Stacking training process. The `train_metalearner()` function
+ can also be run manually (or repeatedly).
Returns:
- history (dict): A history dictionary from a Keras history object which contains several logs.
+ history (dict): A history dictionary from a Keras history object which contains several
+ logs.
"""
temp_dg = training_generator # Template DataGenerator variable for faster access
history_stacking = {} # Final history dictionary
@@ -213,8 +226,8 @@ def train(self, training_generator, epochs=20, iterations=None,
monitor="val_loss", verbose=1,
save_best_only=True, mode="min")
cb_cl = CSVLogger(os.path.join(self.cache_dir.name,
- "nn_" + str(i) + \
- ".logs.csv"),
+ "nn_" + str(i) +
+ ".logs.csv"),
separator=',', append=True)
callbacks_model.extend([cb_mc, cb_cl])
@@ -249,15 +262,15 @@ def train(self, training_generator, epochs=20, iterations=None,
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Gather training parameters
parameters_training = {"epochs": epochs,
- "iterations": iterations,
- "callbacks": callbacks_model,
- "class_weights": class_weights,
- "transfer_learning": transfer_learning
- }
+ "iterations": iterations,
+ "callbacks": callbacks_model,
+ "class_weights": class_weights,
+ "transfer_learning": transfer_learning
+ }
# Start training process
process_queue = mp.Queue()
@@ -292,11 +305,12 @@ def train_metalearner(self, training_generator):
re-training of the [NeuralNetwork][aucmedi.neural_network.model] models.
Args:
- training_generator (DataGenerator): A data generator which will be used for training (will be split according
- to percentage split sampling).
+ training_generator (DataGenerator): A data generator which will be used for training (will be split
+ according to percentage split sampling).
"""
# Skipping metalearner training if aggregate function
- if isinstance(self.ml_model, Aggregate_Base) : return
+ if isinstance(self.ml_model, Aggregate_Base):
+ return
temp_dg = training_generator # Template DataGenerator variable for faster access
preds_ensemble = []
@@ -312,13 +326,16 @@ def train_metalearner(self, training_generator):
seed=self.sampling_seed)
# Pack data according to sampling
- if len(ps_sampling[0]) == 3 : data_ensemble = ps_sampling[2]
- else : data_ensemble = (*ps_sampling[2], None)
+ if len(ps_sampling[0]) == 3:
+ data_ensemble = ps_sampling[2]
+ else:
+ data_ensemble = (*ps_sampling[2], None)
# Identify path to model directory
if isinstance(self.cache_dir, tempfile.TemporaryDirectory):
path_model_dir = self.cache_dir.name
- else : path_model_dir = self.cache_dir
+ else:
+ path_model_dir = self.cache_dir
# Sequentially iterate over model list
for i in range(len(self.model_list)):
@@ -357,7 +374,7 @@ def train_metalearner(self, training_generator):
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Start inference process for model i
process_queue = mp.Queue()
@@ -405,18 +422,20 @@ def predict(self, prediction_generator, return_ensemble=False):
return_ensemble (bool): Option, whether gathered ensemble of predictions should be returned.
Returns:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
- ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if `return_ensemble=True`.
- Shape (n_models, n_samples, n_labels).
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape
+ (n_samples, n_labels).
+ ensemble (numpy.ndarray): Optional ensemble of predictions: Will be only passed if
+ `return_ensemble=True`. Shape (n_models, n_samples, n_labels).
"""
# Verify if there is a linked cache dictionary
- con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and \
+ con_tmp = (isinstance(self.cache_dir, tempfile.TemporaryDirectory) and
os.path.exists(self.cache_dir.name))
- con_var = (self.cache_dir is not None and \
- not isinstance(self.cache_dir, tempfile.TemporaryDirectory) \
+ con_var = (self.cache_dir is not None and
+ not isinstance(self.cache_dir, tempfile.TemporaryDirectory)
and os.path.exists(self.cache_dir))
if not con_tmp and not con_var:
- raise FileNotFoundError("Stacking does not have a valid model cache directory!")
+ raise FileNotFoundError(
+ "Stacking does not have a valid model cache directory!")
# Initialize some variables
temp_dg = prediction_generator
@@ -429,7 +448,8 @@ def predict(self, prediction_generator, return_ensemble=False):
# Identify path to model directory
if isinstance(self.cache_dir, tempfile.TemporaryDirectory):
path_model_dir = self.cache_dir.name
- else : path_model_dir = self.cache_dir
+ else:
+ path_model_dir = self.cache_dir
# Sequentially iterate over model list
for i in range(len(self.model_list)):
@@ -467,7 +487,7 @@ def predict(self, prediction_generator, return_ensemble=False):
"loader": temp_dg.sample_loader,
"workers": temp_dg.workers,
"kwargs": temp_dg.kwargs
- }
+ }
# Start inference process for model i
process_queue = mp.Queue()
@@ -496,15 +516,17 @@ def predict(self, prediction_generator, return_ensemble=False):
# Apply homogeneous aggregate function
elif isinstance(self.ml_model, Aggregate_Base):
for i in range(preds_ensemble.shape[0]):
- pred_sample = self.ml_model.aggregate(preds_ensemble[i,:,:])
+ pred_sample = self.ml_model.aggregate(preds_ensemble[i, :, :])
preds_final.append(pred_sample)
# Convert prediction list to NumPy
preds_final = np.asarray(preds_final)
# Return ensembled predictions
- if return_ensemble : return preds_final, np.swapaxes(preds_ensemble,1,0)
- else : return preds_final
+ if return_ensemble:
+ return preds_final, np.swapaxes(preds_ensemble, 1, 0)
+ else:
+ return preds_final
# Dump model to file
def dump(self, directory_path):
@@ -517,7 +539,8 @@ def dump(self, directory_path):
directory_path (str): Path to store the model directory on disk.
"""
if self.cache_dir is None:
- raise FileNotFoundError("Stacking does not have a valid model cache directory!")
+ raise FileNotFoundError(
+ "Stacking does not have a valid model cache directory!")
elif isinstance(self.cache_dir, tempfile.TemporaryDirectory):
shutil.copytree(self.cache_dir.name, directory_path,
dirs_exist_ok=True)
@@ -543,7 +566,7 @@ def load(self, directory_path):
path_model = os.path.join(directory_path,
"nn_" + str(i) + ".model.keras")
if not os.path.exists(path_model):
- raise FileNotFoundError("Stacking model " + str(i) + \
+ raise FileNotFoundError("Stacking model " + str(i) +
" does not exist!", path_model)
# If heterogenous metalearner -> load metalearner model file
if isinstance(self.ml_model, Metalearner_Base):
@@ -557,6 +580,7 @@ def load(self, directory_path):
# Update model directory
self.cache_dir = directory_path
+
#-----------------------------------------------------#
# Subroutines #
#-----------------------------------------------------#
@@ -611,6 +635,7 @@ def __training_process__(queue, model_paras, data_train, data_val,
# Store result in cache (which will be returned by the process queue)
queue.put(nn_history)
+
# Internal function for inference with a fitted NeuralNetwork model in a separate process
def __prediction_process__(queue, model_paras, path_model, data_test,
datagen_paras):
diff --git a/aucmedi/evaluation/__init__.py b/aucmedi/evaluation/__init__.py
index 9fc77adc..47431d09 100644
--- a/aucmedi/evaluation/__init__.py
+++ b/aucmedi/evaluation/__init__.py
@@ -33,7 +33,7 @@
| [Performance Evaluation][aucmedi.evaluation.performance] | Evaluate the performance of a single model / prediction list through various metrics. |
| [Performance Comparison][aucmedi.evaluation.comparison] | Compare the performance of predictions from multiple models. |
-"""
+""" # noqa 501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
@@ -41,3 +41,11 @@
from aucmedi.evaluation.performance import evaluate_performance
from aucmedi.evaluation.comparison import evaluate_comparison
from aucmedi.evaluation.dataset import evaluate_dataset
+
+
+__all__ = [
+ "evaluate_fitting",
+ "evaluate_performance",
+ "evaluate_comparison",
+ "evaluate_dataset"
+]
diff --git a/aucmedi/evaluation/comparison.py b/aucmedi/evaluation/comparison.py
index 12e28037..aa4e0e79 100644
--- a/aucmedi/evaluation/comparison.py
+++ b/aucmedi/evaluation/comparison.py
@@ -19,13 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
-import numpy as np
+# Third Party Libraries
import pandas as pd
-import os
-from plotnine import *
-# Internal libraries/scripts
-from aucmedi.evaluation.metrics import *
+import plotnine as p9
+
+# Internal Libraries
+from aucmedi.evaluation.metrics import compute_metrics
+
#-----------------------------------------------------#
# Evaluation - Compare Performance #
@@ -101,20 +101,22 @@ def evaluate_comparison(pred_list,
Predictions based on [ISIC 2019 Challenge](https://challenge.isic-archive.com/landing/2019/).
Args:
- pred_list (list of numpy.ndarray): A list of NumPy arrays containing predictions from multiple models formatted with shape
- (n_models, n_samples, n_labels). Provided by [NeuralNetwork][aucmedi.neural_network.model].
+ pred_list (list of numpy.ndarray): A list of NumPy arrays containing predictions from multiple models formatted
+ with shape (n_models, n_samples, n_labels). Provided by
+ [NeuralNetwork][aucmedi.neural_network.model].
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
out_path (str): Path to directory in which plotted figures are stored.
- model_names (list of str): List of names for corresponding models which are for visualization. If not provided (`None`
- provided), model index of `pred_list` will be used.
+ model_names (list of str): List of names for corresponding models which are for visualization. If not
+ provided (`None` provided), model index of `pred_list` will be used.
class_names (list of str): List of names for corresponding classes. Used for evaluation. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
If not provided (`None` provided), class indices will be used.
multi_label (bool): Option, whether task is multi-label based (has impact on evaluation).
- metrics_threshold (float): Only required if 'multi_label==True`. Threshold value if prediction is positive.
- Used in metric computation for CSV and bar plot.
- macro_average_classes (bool): Option, whether classes should be macro-averaged in order to increase visualization overview.
+ metrics_threshold (float): Only required if 'multi_label==True`. Threshold value if prediction is
+ positive. Used in metric computation for CSV and bar plot.
+ macro_average_classes (bool): Option, whether classes should be macro-averaged in order to increase
+ visualization overview.
suffix (str): Special suffix to add in the created figure filename.
Returns:
@@ -124,8 +126,10 @@ def evaluate_comparison(pred_list,
# Identify number of labels
n_labels = labels.shape[-1]
# Identify prediction threshold
- if multi_label : threshold = metrics_threshold
- else : threshold = None
+ if multi_label:
+ threshold = metrics_threshold
+ else:
+ threshold = None
# Compute metric dataframe for each mode
df_list = []
@@ -142,12 +146,15 @@ def evaluate_comparison(pred_list,
metrics["class"] = pd.Categorical(metrics["class"])
# Assign model name to dataframe
- if model_names is not None : metrics["model"] = model_names[m]
- else : metrics["model"] = "model_" + str(m)
+ if model_names is not None:
+ metrics["model"] = model_names[m]
+ else:
+ metrics["model"] = "model_" + str(m)
# Optional: Macro average classes
if macro_average_classes:
- metrics_avg = metrics.groupby(["metric", "model"])[["score"]].mean()
+ metrics_avg = metrics.groupby(["metric", "model"])[
+ ["score"]].mean()
metrics = metrics_avg.reset_index()
# Append to dataframe list
@@ -164,6 +171,7 @@ def evaluate_comparison(pred_list,
# Return combined and gain dataframe
return df_merged, df_gain
+
#-----------------------------------------------------#
# Evaluation Comparison - Beside #
#-----------------------------------------------------#
@@ -173,37 +181,39 @@ def evalby_beside(df, out_path, suffix=None):
# Plot metric results class-wise
if "class" in df.columns:
- fig = (ggplot(df, aes("model", "score", fill="model"))
- + geom_col(stat='identity', width=0.6, color="black",
- position = position_dodge(width=0.6))
- + ggtitle("Performance Comparison: Metric Overview")
- + facet_grid("metric ~ class")
- + coord_flip()
- + xlab("")
- + ylab("Score")
- + scale_y_continuous(limits=[0, 1])
- + theme_bw()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df, p9.aes("model", "score", fill="model"))
+ + p9.geom_col(stat='identity', width=0.6, color="black",
+ position=p9.position_dodge(width=0.6))
+ + p9.ggtitle("Performance Comparison: Metric Overview")
+ + p9.facet_grid("metric ~ class")
+ + p9.coord_flip()
+ + p9.xlab("")
+ + p9.ylab("Score")
+ + p9.scale_y_continuous(limits=[0, 1])
+ + p9.theme_bw()
+ + p9.theme(legend_position="none"))
# Plot metric results class macro-averaged
else:
- fig = (ggplot(df, aes("model", "score", fill="model"))
- + geom_col(stat='identity', width=0.6, color="black",
- position = position_dodge(width=0.6))
- + ggtitle("Performance Comparison: Metric Overview")
- + facet_wrap("metric")
- + coord_flip()
- + xlab("")
- + ylab("Score")
- + scale_y_continuous(limits=[0, 1])
- + theme_bw()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df, p9.aes("model", "score", fill="model"))
+ + p9.geom_col(stat='identity', width=0.6, color="black",
+ position=p9.position_dodge(width=0.6))
+ + p9.ggtitle("Performance Comparison: Metric Overview")
+ + p9.facet_wrap("metric")
+ + p9.coord_flip()
+ + p9.xlab("")
+ + p9.ylab("Score")
+ + p9.scale_y_continuous(limits=[0, 1])
+ + p9.theme_bw()
+ + p9.theme(legend_position="none"))
# Store figure to disk
filename = "plot.comparison.beside"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=18, height=9, dpi=300)
+
#-----------------------------------------------------#
# Evaluation Comparison - Gain #
#-----------------------------------------------------#
@@ -218,7 +228,7 @@ def compute_gain(row, template):
# Obtain class-wise divisor
if "class" in row.index:
c = row["class"]
- div = template.loc[(template["metric"] == m) & \
+ div = template.loc[(template["metric"] == m) &
(template["class"] == c)]["score"].values[0]
# Obtain macro-averaged divisor
else:
@@ -234,32 +244,35 @@ def compute_gain(row, template):
# Plot gain results class-wise
if "class" in df.columns:
- fig = (ggplot(df, aes("model", "score", fill="model"))
- + geom_col(stat='identity', width=0.6, color="black",
- position = position_dodge(width=0.2))
- + ggtitle("Performance Gain compared to Model: " + str(first_model))
- + facet_grid("metric ~ class")
- + coord_flip()
- + xlab("")
- + ylab("Performance Gain in Percent (%)")
- + theme_bw()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df, p9.aes("model", "score", fill="model"))
+ + p9.geom_col(stat='identity', width=0.6, color="black",
+ position=p9.position_dodge(width=0.2))
+ + p9.ggtitle("Performance Gain compared to Model: " +
+ str(first_model))
+ + p9.facet_grid("metric ~ class")
+ + p9.coord_flip()
+ + p9.xlab("")
+ + p9.ylab("Performance Gain in Percent (%)")
+ + p9.theme_bw()
+ + p9.theme(legend_position="none"))
# Plot gain results class macro-averaged
else:
- fig = (ggplot(df, aes("model", "score", fill="model"))
- + geom_col(stat='identity', width=0.6, color="black",
- position = position_dodge(width=0.2))
- + ggtitle("Performance Gain compared to Model: " + str(first_model))
- + facet_wrap("metric")
- + coord_flip()
- + xlab("")
- + ylab("Performance Gain in Percent (%)")
- + theme_bw()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df, p9.aes("model", "score", fill="model"))
+ + p9.geom_col(stat='identity', width=0.6, color="black",
+ position=p9.position_dodge(width=0.2))
+ + p9.ggtitle("Performance Gain compared to Model: " +
+ str(first_model))
+ + p9.facet_wrap("metric")
+ + p9.coord_flip()
+ + p9.xlab("")
+ + p9.ylab("Performance Gain in Percent (%)")
+ + p9.theme_bw()
+ + p9.theme(legend_position="none"))
# Store figure to disk
filename = "plot.comparison.gain"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=18, height=9, dpi=300)
diff --git a/aucmedi/evaluation/dataset.py b/aucmedi/evaluation/dataset.py
index 7a5056da..bd0f5890 100644
--- a/aucmedi/evaluation/dataset.py
+++ b/aucmedi/evaluation/dataset.py
@@ -19,12 +19,11 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Third Party Libraries
import numpy as np
import pandas as pd
-import os
-from plotnine import *
-# Internal libraries/scripts
+import plotnine as p9
+
#-----------------------------------------------------#
# Evaluation - Dataset Analysis #
@@ -82,7 +81,8 @@ def evaluate_dataset(samples,
If not provided (`None` provided), class indices will be used.
show (bool): Option, whether to also display the generated charts.
plot_barplot (bool): Option, whether to generate a bar plot of class distribution.
- plot_heatmap (bool): Option, whether to generate a heatmap of class overview. Only recommended for subsets of ~50 samples.
+ plot_heatmap (bool): Option, whether to generate a heatmap of class overview. Only recommended
+ for subsets of ~50 samples.
suffix (str): Special suffix to add in the created figure filename.
Returns:
@@ -100,6 +100,7 @@ def evaluate_dataset(samples,
# Return table with class distribution
return df_cf
+
#-----------------------------------------------------#
# Dataset Analysis - Barplot #
#-----------------------------------------------------#
@@ -110,8 +111,10 @@ def evalby_barplot(labels, out_path, class_names, plot_barplot, show=False,
for c in range(0, labels.shape[1]):
n_samples = labels.shape[0]
class_freq = np.sum(labels[:, c])
- if class_names is None : curr_class = str(c)
- else : curr_class = class_names[c]
+ if class_names is None:
+ curr_class = str(c)
+ else:
+ curr_class = class_names[c]
class_percentage = np.round(class_freq / n_samples, 2) * 100
cf_list.append([curr_class, class_freq, class_percentage])
@@ -122,38 +125,43 @@ def evalby_barplot(labels, out_path, class_names, plot_barplot, show=False,
if plot_barplot:
# Plot class frequency results
- fig = (ggplot(df_cf, aes("class", "class_perc", fill="class"))
- + geom_bar(stat="identity", color="black")
- + geom_text(aes(label="class_freq"), nudge_y=5)
- + coord_flip()
- + ggtitle("Dataset Analysis: Class Distribution")
- + xlab("Classes")
- + ylab("Class Frequency (in %)")
- + scale_y_continuous(limits=[0, 100],
- breaks=np.arange(0,110,10))
- + theme_bw()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df_cf, p9.aes("class", "class_perc", fill="class"))
+ + p9.geom_bar(stat="identity", color="black")
+ + p9.geom_text(p9.aes(label="class_freq"), nudge_y=5)
+ + p9.coord_flip()
+ + p9.ggtitle("Dataset Analysis: Class Distribution")
+ + p9.xlab("Classes")
+ + p9.ylab("Class Frequency (in %)")
+ + p9.scale_y_continuous(limits=[0, 100],
+ breaks=np.arange(0, 110, 10))
+ + p9.theme_bw()
+ + p9.theme(legend_position="none"))
# Store figure to disk
filename = "plot.dataset.barplot"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=10, height=9, dpi=200)
# Plot figure
- if show : print(fig)
+ if show:
+ print(fig)
# Return class table
return df_cf
+
#-----------------------------------------------------#
# Dataset Analysis - Heatmap #
#-----------------------------------------------------#
def evalby_heatmap(samples, labels, out_path, class_names, show=False,
suffix=None):
# Create dataframe
- if class_names is None : df = pd.DataFrame(labels, index=samples)
- else : df = pd.DataFrame(labels, index=samples, columns=class_names)
+ if class_names is None:
+ df = pd.DataFrame(labels, index=samples)
+ else:
+ df = pd.DataFrame(labels, index=samples, columns=class_names)
# Preprocess dataframe
df = df.reset_index()
@@ -161,21 +169,23 @@ def evalby_heatmap(samples, labels, out_path, class_names, show=False,
value_name="presence")
# Plot heatmap
- fig = (ggplot(df_melted, aes("index", "class", fill="presence"))
- + geom_tile()
- + coord_flip()
- + ggtitle("Dataset Analysis: Overview")
- + xlab("Samples")
- + ylab("Classes")
- + scale_fill_gradient(low="white", high="#3399FF")
- + theme_classic()
- + theme(legend_position="none"))
+ fig = (p9.ggplot(df_melted, p9.aes("index", "class", fill="presence"))
+ + p9.geom_tile()
+ + p9.coord_flip()
+ + p9.ggtitle("Dataset Analysis: Overview")
+ + p9.xlab("Samples")
+ + p9.ylab("Classes")
+ + p9.scale_fill_gradient(low="white", high="#3399FF")
+ + p9.theme_classic()
+ + p9.theme(legend_position="none"))
# Store figure to disk
filename = "plot.dataset.heatmap"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=10, height=9, dpi=200)
# Plot figure
- if show : print(fig)
+ if show:
+ print(fig)
diff --git a/aucmedi/evaluation/fitting.py b/aucmedi/evaluation/fitting.py
index 81a1786f..f18c6c42 100644
--- a/aucmedi/evaluation/fitting.py
+++ b/aucmedi/evaluation/fitting.py
@@ -19,15 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Third Party Libraries
import numpy as np
import pandas as pd
-import os
-from plotnine import *
+import plotnine as p9
-#-----------------------------------------------------#
+
+# -----------------------------------------------------#
# Evaluation - Plot Fitting #
-#-----------------------------------------------------#
+# -----------------------------------------------------#
def evaluate_fitting(train_history,
out_path,
monitor=["loss"],
@@ -66,7 +66,7 @@ def evaluate_fitting(train_history,
show (bool): Option, whether to also display the generated chart.
"""
# Convert to pandas dataframe
- hist_prepared = dict([ (k,pd.Series(v)) for k,v in train_history.items() ])
+ hist_prepared = dict([(k, pd.Series(v)) for k, v in train_history.items()])
dt = pd.DataFrame.from_dict(hist_prepared, orient="columns")
# Identify all selected columns
@@ -93,8 +93,7 @@ def evaluate_fitting(train_history,
valid_split = False
break
if valid_split:
- dt_melted[["prefix", "metric"]] = dt_melted["metric"].str.split(".",
- expand=True)
+ dt_melted[["prefix", "metric"]] = dt_melted["metric"].str.split(".", expand=True)
# Remove NaN tags
dt_melted = dt_melted.dropna(axis=0)
@@ -111,8 +110,10 @@ def evaluate_fitting(train_history,
tl_epochs = filter_tl.groupby(["prefix"])["epoch"].max()
# compute fine tune epoch update
group_repeats = filter.groupby(["prefix"]).size()
- if group_repeats.empty : ft_update = 0
- else : ft_update = tl_epochs.repeat(group_repeats).to_numpy()
+ if group_repeats.empty:
+ ft_update = 0
+ else:
+ ft_update = tl_epochs.repeat(group_repeats).to_numpy()
# if no prefix available -> add epochs to all ft phases
else:
# identify number of epochs global
@@ -125,7 +126,8 @@ def evaluate_fitting(train_history,
# update number of epochs
dt_melted.loc[dt_melted["metric"].str.startswith("ft_"), "epoch"] =\
filter["epoch"].to_numpy() + ft_update
- else : tl_epochs = None
+ else:
+ tl_epochs = None
# Remove preprocessed transfer learning tag from metric column
dt_melted["metric"] = dt_melted["metric"].apply(remove_tag, tag="tl_")
@@ -137,37 +139,43 @@ def evaluate_fitting(train_history,
dt_melted["metric"] = dt_melted["metric"].apply(remove_tag, tag="val_")
# Plot results via plotnine
- fig = (ggplot(dt_melted, aes("epoch", "score", color="subset"))
- + geom_line(size=1)
- + ggtitle("Fitting Curve during Training Process")
- + xlab("Epoch")
- + ylab("Score")
- + scale_colour_discrete(name="Subset")
- + theme_bw()
- + theme(subplots_adjust={'wspace':0.2}))
+ fig = (p9.ggplot(dt_melted, p9.aes("epoch", "score", color="subset"))
+ + p9.geom_line(size=1)
+ + p9.ggtitle("Fitting Curve during Training Process")
+ + p9.xlab("Epoch")
+ + p9.ylab("Score")
+ + p9.scale_color_discrete(name="Subset")
+ + p9.theme_bw()
+ + p9.theme(subplots_adjust={'wspace': 0.2}))
if prefix_split is not None and valid_split:
- fig += facet_grid("prefix ~ metric")
- else : fig += facet_wrap("metric", scales="free_y")
+ fig += p9.facet_grid("prefix ~ metric")
+ else:
+ fig += p9.facet_wrap("metric", scales="free_y")
if tl_epochs is not None and valid_split:
tle_df = tl_epochs.to_frame().reset_index()
- fig += geom_vline(tle_df, aes(xintercept="epoch"))
+ fig += p9.geom_vline(tle_df, p9.aes(xintercept="epoch"))
elif tl_epochs is not None and not valid_split:
- fig += geom_vline(xintercept=tl_epochs)
+ fig += p9.geom_vline(xintercept=tl_epochs)
# Store figure to disk
filename = "plot.fitting_course"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename,
path=out_path, dpi=200, limitsize=False)
- if show : print(fig)
+ if show:
+ print(fig)
+
#-----------------------------------------------------#
# Subroutines #
#-----------------------------------------------------#
def remove_tag(x, tag):
- if x.startswith(tag) : return x[len(tag):]
- else : return x
+ if x.startswith(tag):
+ return x[len(tag):]
+ else:
+ return x
diff --git a/aucmedi/evaluation/metrics.py b/aucmedi/evaluation/metrics.py
index 570bd897..d1f3486b 100644
--- a/aucmedi/evaluation/metrics.py
+++ b/aucmedi/evaluation/metrics.py
@@ -19,10 +19,11 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Third Party Libraries
import numpy as np
import pandas as pd
-from sklearn.metrics import roc_curve, roc_auc_score
+from sklearn.metrics import roc_auc_score, roc_curve
+
#-----------------------------------------------------#
# Computation: Classification Metrics #
@@ -35,11 +36,12 @@ def compute_metrics(preds, labels, n_labels, threshold=None):
FDR, TruePositives, TrueNegatives, FalsePositives, FalseNegatives
Args:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels). Provided by
- [NeuralNetwork][aucmedi.neural_network.model].
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
+ Provided by [NeuralNetwork][aucmedi.neural_network.model].
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
- n_labels (int): Number of classes. Provided by [input_interface][aucmedi.data_processing.io_data.input_interface].
+ n_labels (int): Number of classes. Provided by
+ [input_interface][aucmedi.data_processing.io_data.input_interface].
threshold (float): Only required for multi_label data. Threshold value if prediction is positive.
Returns:
@@ -80,7 +82,7 @@ def compute_metrics(preds, labels, n_labels, threshold=None):
# Compute area under the ROC curve
try:
data_dict["AUC"] = roc_auc_score(truth, pred_prob)
- except:
+ except Exception:
print("ROC AUC score is not defined.")
# Parse metrics to dataframe
@@ -98,6 +100,7 @@ def compute_metrics(preds, labels, n_labels, threshold=None):
# Return final dataframe
return df_final
+
#-----------------------------------------------------#
# Computation: Confusion Matrix #
#-----------------------------------------------------#
@@ -105,11 +108,12 @@ def compute_confusion_matrix(preds, labels, n_labels):
""" Function for computing a confusion matrix.
Args:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels). Provided by
- [NeuralNetwork][aucmedi.neural_network.model].
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
+ Provided by [NeuralNetwork][aucmedi.neural_network.model].
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
- n_labels (int): Number of classes. Provided by [input_interface][aucmedi.data_processing.io_data.input_interface].
+ n_labels (int): Number of classes. Provided by
+ [input_interface][aucmedi.data_processing.io_data.input_interface].
Returns:
rawcm (numpy.ndarray): NumPy matrix with shape (n_labels, n_labels).
@@ -121,6 +125,7 @@ def compute_confusion_matrix(preds, labels, n_labels):
rawcm[labels_argmax[i]][preds_argmax[i]] += 1
return rawcm
+
#-----------------------------------------------------#
# Computation: ROC Coordinates #
#-----------------------------------------------------#
@@ -128,14 +133,17 @@ def compute_roc(preds, labels, n_labels):
""" Function for computing the data data of a ROC curve (FPR and TPR).
Args:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels). Provided by
- [NeuralNetwork][aucmedi.neural_network.model].
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
+ Provided by [NeuralNetwork][aucmedi.neural_network.model].
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
- n_labels (int): Number of classes. Provided by [input_interface][aucmedi.data_processing.io_data.input_interface].
+ n_labels (int): Number of classes. Provided by
+ [input_interface][aucmedi.data_processing.io_data.input_interface].
Returns:
- fpr_list (list of list): List containing a list of false positive rate points for each class. Shape: (n_labels, tpr_coords).
- tpr_list (list of list): List containing a list of true positive rate points for each class. Shape: (n_labels, fpr_coords).
+ fpr_list (list of list): List containing a list of false positive rate points for each class. Shape:
+ (n_labels, tpr_coords).
+ tpr_list (list of list): List containing a list of true positive rate points for each class. Shape:
+ (n_labels, fpr_coords).
"""
fpr_list = []
tpr_list = []
@@ -147,6 +155,7 @@ def compute_roc(preds, labels, n_labels):
tpr_list.append(tpr)
return fpr_list, tpr_list
+
#-----------------------------------------------------#
# Subroutines #
#-----------------------------------------------------#
@@ -157,9 +166,14 @@ def compute_CM(gt, pd):
fp = 0
fn = 0
for i in range(0, len(gt)):
- if gt[i] == 1 and pd[i] == 1 : tp += 1
- elif gt[i] == 1 and pd[i] == 0 : fn += 1
- elif gt[i] == 0 and pd[i] == 0 : tn += 1
- elif gt[i] == 0 and pd[i] == 1 : fp += 1
- else : print("ERROR at confusion matrix", i)
+ if gt[i] == 1 and pd[i] == 1:
+ tp += 1
+ elif gt[i] == 1 and pd[i] == 0:
+ fn += 1
+ elif gt[i] == 0 and pd[i] == 0:
+ tn += 1
+ elif gt[i] == 0 and pd[i] == 1:
+ fp += 1
+ else:
+ print("ERROR at confusion matrix", i)
return tp, tn, fp, fn
diff --git a/aucmedi/evaluation/performance.py b/aucmedi/evaluation/performance.py
index 77b6ef36..c3bdce02 100644
--- a/aucmedi/evaluation/performance.py
+++ b/aucmedi/evaluation/performance.py
@@ -19,13 +19,17 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+import os
+
+# Third Party Libraries
import numpy as np
import pandas as pd
-import os
-from plotnine import *
-# Internal libraries/scripts
-from aucmedi.evaluation.metrics import *
+import plotnine as p9
+
+# Internal Libraries
+from aucmedi.evaluation.metrics import compute_confusion_matrix, compute_metrics, compute_roc
+
#-----------------------------------------------------#
# Evaluation - Plot Performance #
@@ -42,7 +46,8 @@ def evaluate_performance(preds,
plot_barplot=True,
plot_confusion_matrix=True,
plot_roc_curve=True):
- """ Function for automatic performance evaluation based on model predictions.
+ """ Function for automatic performance evaluation based on model
+ predictions.
???+ example
```python
@@ -51,7 +56,7 @@ def evaluate_performance(preds,
from aucmedi.evaluation import *
# Load data
- ds = input_interface(interface="csv", # Interface type
+ ds = input_interface(interface="csv", # Interface type
path_imagedir="dataset/images/",
path_data="dataset/annotations.csv",
ohe=False, col_sample="ID", col_class="diagnosis")
@@ -95,8 +100,8 @@ def evaluate_performance(preds,
utilizing a DenseNet121.
Args:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels). Provided by
- [NeuralNetwork][aucmedi.neural_network.model].
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
+ Provided by [NeuralNetwork][aucmedi.neural_network.model].
labels (numpy.ndarray): Classification list with One-Hot Encoding. Provided by
[input_interface][aucmedi.data_processing.io_data.input_interface].
out_path (str): Path to directory in which plotted figures are stored.
@@ -119,8 +124,10 @@ def evaluate_performance(preds,
# Identify number of labels
n_labels = labels.shape[-1]
# Identify prediction threshold
- if multi_label : threshold = metrics_threshold
- else : threshold = None
+ if multi_label:
+ threshold = metrics_threshold
+ else:
+ threshold = None
# Compute metrics
metrics = compute_metrics(preds, labels, n_labels, threshold)
@@ -133,7 +140,8 @@ def evaluate_performance(preds,
for c in range(len(class_names)):
class_mapping[c] = class_names[c]
metrics["class"].replace(class_mapping, inplace=True)
- if class_names is None : metrics["class"] = pd.Categorical(metrics["class"])
+ if class_names is None:
+ metrics["class"] = pd.Categorical(metrics["class"])
# Store metrics to CSV
if store_csv:
@@ -141,19 +149,23 @@ def evaluate_performance(preds,
# Generate bar plot
if plot_barplot:
- evalby_barplot(metrics, out_path, class_names, show=show, suffix=suffix)
+ evalby_barplot(metrics, out_path, class_names,
+ show=show, suffix=suffix)
# Generate confusion matrix plot
if plot_confusion_matrix and not multi_label:
- evalby_confusion_matrix(cm, out_path, class_names, show=show, suffix=suffix)
+ evalby_confusion_matrix(
+ cm, out_path, class_names, show=show, suffix=suffix)
# Generate ROC curve
if plot_roc_curve:
- evalby_rocplot(fpr_list, tpr_list, out_path, class_names, show=show, suffix=suffix)
+ evalby_rocplot(fpr_list, tpr_list, out_path,
+ class_names, show=show, suffix=suffix)
# Return metrics
return metrics
+
#-----------------------------------------------------#
# Evaluation Performance - Confusion Matrix #
#-----------------------------------------------------#
@@ -177,29 +189,33 @@ def evalby_confusion_matrix(confusion_matrix, out_path, class_names,
dt.rename(columns={"index": "gt"}, inplace=True)
# Generate confusion matrix
- fig = (ggplot(dt, aes("pd", "gt", fill="score"))
- + geom_tile(color="white", size=1.5)
- + geom_text(aes("pd", "gt", label="score"), color="black")
- + ggtitle("Performance Evaluation: Confusion Matrix")
- + xlab("Prediction")
- + ylab("Ground Truth")
- + scale_fill_gradient(low="white", high="royalblue",
- limits=[0, 100])
- + guides(fill = guide_colourbar(title="%",
- barwidth=10,
- barheight=50))
- + theme_bw()
- + theme(axis_text_x = element_text(angle = 45, vjust = 1,
- hjust = 1)))
+ fig = (p9.ggplot(dt, p9.aes("pd", "gt", fill="score"))
+ + p9.geom_tile(color="white", size=1.5)
+ + p9.geom_text(p9.aes("pd", "gt",
+ label="score"), color="black")
+ + p9.ggtitle("Performance Evaluation: Confusion Matrix")
+ + p9.xlab("Prediction")
+ + p9.ylab("Ground Truth")
+ + p9.scale_fill_gradient(low="white", high="royalblue",
+ limits=[0, 100])
+ + p9.guides(fill=p9.guide_colourbar(title="%",
+ barwidth=10,
+ barheight=50))
+ + p9.theme_bw()
+ + p9.theme(axis_text_x=p9.element_text(angle=45, vjust=1,
+ hjust=1)))
# Store figure to disk
filename = "plot.performance.confusion_matrix"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=10, height=9, dpi=200)
# Plot figure
- if show : print(fig)
+ if show:
+ print(fig)
+
#-----------------------------------------------------#
# Evaluation Performance - Barplots #
@@ -210,26 +226,30 @@ def evalby_barplot(metrics, out_path, class_names, show=False, suffix=None):
df_metrics["class"] = pd.Categorical(df_metrics["class"])
# Generate metric results
- fig = (ggplot(df_metrics, aes("class", "score", fill="class"))
- + geom_col(stat='identity', width=0.6, color="black",
- position = position_dodge(width=0.6))
- + ggtitle("Performance Evaluation: Metric Overview")
- + facet_wrap("metric")
- + coord_flip()
- + xlab("")
- + ylab("Score")
- + scale_y_continuous(limits=[0, 1], breaks=np.arange(0, 1.1, 0.1))
- + scale_fill_discrete(name="Classes")
- + theme_bw())
+ fig = (p9.ggplot(df_metrics, p9.aes("class", "score", fill="class"))
+ + p9.geom_col(stat='identity', width=0.6, color="black",
+ position=p9.position_dodge(width=0.6))
+ + p9.ggtitle("Performance Evaluation: Metric Overview")
+ + p9.facet_wrap("metric")
+ + p9.coord_flip()
+ + p9.xlab("")
+ + p9.ylab("Score")
+ + p9.scale_y_continuous(limits=[0, 1],
+ breaks=np.arange(0, 1.1, 0.1))
+ + p9.scale_fill_discrete(name="Classes")
+ + p9.theme_bw())
# Store figure to disk
filename = "plot.performance.barplot"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=12, height=9, dpi=200)
# Plot figure
- if show : print(fig)
+ if show:
+ print(fig)
+
#-----------------------------------------------------#
# Evaluation Performance - ROC plot #
@@ -254,26 +274,31 @@ def evalby_rocplot(fpr_list, tpr_list, out_path, class_names, show=False, suffix
df_roc["TPR"] = df_roc["TPR"].astype(float)
# Generate roc results
- fig = (ggplot(df_roc, aes("FPR", "TPR", color="class"))
- + geom_line(size=1.0)
- + geom_abline(intercept=0, slope=1, color="black",
- linetype="dashed")
- + ggtitle("Performance Evaluation: ROC Curves")
- + xlab("False Positive Rate")
- + ylab("True Positive Rate")
- + scale_x_continuous(limits=[0, 1], breaks=np.arange(0,1.1,0.1))
- + scale_y_continuous(limits=[0, 1], breaks=np.arange(0,1.1,0.1))
- + scale_color_discrete(name="Classes")
- + theme_bw())
+ fig = (p9.ggplot(df_roc, p9.aes("FPR", "TPR", color="class"))
+ + p9.geom_line(size=1.0)
+ + p9.geom_abline(intercept=0, slope=1, color="black",
+ linetype="dashed")
+ + p9.ggtitle("Performance Evaluation: ROC Curves")
+ + p9.xlab("False Positive Rate")
+ + p9.ylab("True Positive Rate")
+ + p9.scale_x_continuous(limits=[0, 1],
+ breaks=np.arange(0, 1.1, 0.1))
+ + p9.scale_y_continuous(limits=[0, 1],
+ breaks=np.arange(0, 1.1, 0.1))
+ + p9.scale_color_discrete(name="Classes")
+ + p9.theme_bw())
# Store figure to disk
filename = "plot.performance.roc"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".png"
fig.save(filename=filename, path=out_path, width=10, height=9, dpi=200)
# Plot figure
- if show : print(fig)
+ if show:
+ print(fig)
+
#-----------------------------------------------------#
# Evaluation Performance - CSV file #
@@ -281,7 +306,8 @@ def evalby_rocplot(fpr_list, tpr_list, out_path, class_names, show=False, suffix
def evalby_csv(metrics, out_path, class_names, suffix=None):
# Obtain filename to
filename = "metrics.performance"
- if suffix is not None : filename += "." + str(suffix)
+ if suffix is not None:
+ filename += "." + str(suffix)
filename += ".csv"
path_csv = os.path.join(out_path, filename)
diff --git a/aucmedi/neural_network/architectures/__init__.py b/aucmedi/neural_network/architectures/__init__.py
index ed5f5055..3d36d9ff 100644
--- a/aucmedi/neural_network/architectures/__init__.py
+++ b/aucmedi/neural_network/architectures/__init__.py
@@ -19,17 +19,20 @@
#-----------------------------------------------------#
# Documentation #
#-----------------------------------------------------#
-""" Models are represented with the open-source neural network library [TensorFlow.Keras](https://www.tensorflow.org/api_docs/python/tf/keras)
+""" Models are represented with the open-source neural network library
+ [TensorFlow.Keras](https://www.tensorflow.org/api_docs/python/tf/keras)
which provides an user-friendly API for commonly used neural-network building blocks.
-The already implemented architectures are configurable by custom input sizes, optional dropouts, transfer learning via pretrained weights,
+The already implemented architectures are configurable by custom input sizes, optional dropouts, transfer learning via
+pretrained weights,
meta data inclusion or activation output depending on classification type.
Additionally, AUCMEDI offers architectures for 2D image and 3D volume classification.
???+ example "Example: How to select an Architecture"
For architecture selection, just create a key (str) by adding "2D." or "3D." to the architecture name,
- and pass the key to the `architecture` parameter of the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class.
+ and pass the key to the `architecture` parameter of the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork]
+ class.
```python
# 2D architecture
@@ -52,7 +55,8 @@
the [abstract base class interface][aucmedi.neural_network.architectures.arch_base.Architecture_Base]
for architectures offers the possibility for custom architecture integration into the AUCMEDI pipeline.
-Furthermore, AUCMEDI offers the powerful classification head interface [Classifier][aucmedi.neural_network.architectures.classifier],
+Furthermore, AUCMEDI offers the powerful classification head interface
+[Classifier][aucmedi.neural_network.architectures.classifier],
which can be used for all types of image classifications and will be automatically created in the
[NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class.
"""
@@ -71,12 +75,12 @@
architecture_dict = {}
# Add image architectures to architecture_dict
-from aucmedi.neural_network.architectures.image import architecture_dict as arch_image
+from aucmedi.neural_network.architectures.image import architecture_dict as arch_image # noqa E402
for arch in arch_image:
architecture_dict["2D." + arch] = arch_image[arch]
# Add volume architectures to architecture_dict
-from aucmedi.neural_network.architectures.volume import architecture_dict as arch_volume
+from aucmedi.neural_network.architectures.volume import architecture_dict as arch_volume # noqa E402
for arch in arch_volume:
architecture_dict["3D." + arch] = arch_volume[arch]
@@ -87,11 +91,19 @@
supported_standardize_mode = {}
# Add image architectures to supported_standardize_mode
-from aucmedi.neural_network.architectures.image import supported_standardize_mode as modes_image
+from aucmedi.neural_network.architectures.image import supported_standardize_mode as modes_image # noqa E402
for m in modes_image:
supported_standardize_mode["2D." + m] = modes_image[m]
# Add volume architectures to supported_standardize_mode
-from aucmedi.neural_network.architectures.volume import supported_standardize_mode as modes_volume
+from aucmedi.neural_network.architectures.volume import supported_standardize_mode as modes_volume # noqa E402
for m in modes_volume:
supported_standardize_mode["3D." + m] = modes_volume[m]
+
+
+__all__ = [
+ "Architecture_Base",
+ "Classifier",
+ "architecture_dict",
+ "supported_standardize_mode"
+]
diff --git a/aucmedi/neural_network/architectures/arch_base.py b/aucmedi/neural_network/architectures/arch_base.py
index f24bfe62..5eb7030f 100644
--- a/aucmedi/neural_network/architectures/arch_base.py
+++ b/aucmedi/neural_network/architectures/arch_base.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
from abc import ABC, abstractmethod
+
#-----------------------------------------------------#
# Abstract Interface for an Architecture class #
#-----------------------------------------------------#
@@ -83,7 +84,8 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
Args:
classification_head (Classifier): Classifier object for building the classification head of the model.
channels (int): Number of channels. For example: Grayscale->1 or RGB->3.
- input_shape (tuple): Input shape of the image data for the first model layer (excluding channel axis).
+ input_shape (tuple): Input shape of the image data for the first model layer
+ (excluding channel axis).
pretrained_weights (bool): Option whether to utilize pretrained weights e.g. for ImageNet.
"""
self.classifier = classification_head
diff --git a/aucmedi/neural_network/architectures/classifier.py b/aucmedi/neural_network/architectures/classifier.py
index 8c65de38..ecc6f0a0 100644
--- a/aucmedi/neural_network/architectures/classifier.py
+++ b/aucmedi/neural_network/architectures/classifier.py
@@ -19,9 +19,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from tensorflow.keras.models import Model
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras import Input, layers
+from tensorflow.keras.models import Model
+
#-----------------------------------------------------#
# Classification Head #
@@ -29,7 +32,8 @@
class Classifier:
""" A powerful interface for all types of image classifications.
- This class will be created automatically inside the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class.
+ This class will be created automatically inside the
+ [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class.
!!! info "Supported Features"
- Binary classification
@@ -104,7 +108,7 @@ class Classifier:
resize=my_model.meta_input, # (224,224)
standardize_mode=my_model.meta_standardize) # "torch"
```
- """
+ """ # noqa E501
#---------------------------------------------#
# Initialization #
#---------------------------------------------#
@@ -114,14 +118,19 @@ def __init__(self, n_labels, activation_output="softmax",
The fully connected layer and dropout option (`fcl_dropout`) utilizes a 512 unit Dense layer with 30% Dropout.
- Modi for activation_output: Check out [TensorFlow.Keras doc on activation functions](https://www.tensorflow.org/api_docs/python/tf/keras/activations).
+ Modi for activation_output: Check out
+ [TensorFlow.Keras doc on activation functions](https://www.tensorflow.org/api_docs/python/tf/keras/activations).
Args:
- n_labels (int): Number of classes/labels (important for the last layer of classification head).
+ n_labels (int): Number of classes/labels (important for the last layer of classification
+ head).
activation_output (str): Activation function which is used in the last classification layer.
- meta_variables (int): Number of metadata variables, which should be included in the classification head.
- If `None`is provided, no metadata integration block will be added to the classification head.
- fcl_dropout (bool): Option whether to utilize a Dense & Dropout layer before the last classification layer.
+ meta_variables (int): Number of metadata variables, which should be included in the classification
+ head.
+ If `None`is provided, no metadata integration block will be added to the
+ classification head.
+ fcl_dropout (bool): Option whether to utilize a Dense & Dropout layer before the last
+ classification layer.
"""
self.n_labels = n_labels
self.activation_output = activation_output
@@ -134,7 +143,8 @@ def __init__(self, n_labels, activation_output="softmax",
def build(self, model_input, model_output):
""" Internal function which appends the classification head.
- This function will be called from inside an [Architecture][aucmedi.neural_network.architectures] `create_model()` function
+ This function will be called from inside an
+ [Architecture][aucmedi.neural_network.architectures] `create_model()` function
and must return a functional Keras model.
The `build()` function will append a classification head to the provided Keras model.
@@ -151,7 +161,8 @@ def build(self, model_input, model_output):
elif len(model_output.shape) == 5: # for 3D architectures
model_head = layers.GlobalAveragePooling3D(name="avg_pool")(model_output)
# if not model output shape 4 or 5 -> it is already GlobalAveragePooled to 2 dim
- else : model_head = model_output
+ else:
+ model_head = model_output
# Apply optional dense & dropout layer
if self.fcl_dropout:
@@ -180,7 +191,8 @@ def build(self, model_input, model_output):
# Obtain input layer
if self.meta_variables is not None:
input_layer = [model_input, model_meta]
- else : input_layer = model_input
+ else:
+ input_layer = model_input
# Create tf.keras model
model = Model(inputs=input_layer, outputs=model_head)
diff --git a/aucmedi/neural_network/architectures/image/__init__.py b/aucmedi/neural_network/architectures/image/__init__.py
index 873a31d0..47f2d0a0 100644
--- a/aucmedi/neural_network/architectures/image/__init__.py
+++ b/aucmedi/neural_network/architectures/image/__init__.py
@@ -114,7 +114,8 @@
}
""" Dictionary of implemented 2D Architectures Methods in AUCMEDI.
- The base key (str) or an initialized Architecture can be passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class as `architecture` parameter.
+ The base key (str) or an initialized Architecture can be passed to the
+ [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class as `architecture` parameter.
???+ example "Example"
```python title="Recommended via NeuralNetwork class"
@@ -152,7 +153,8 @@
architecture="2D.ResNet101"
```
- Architectures are based on the abstract base class [aucmedi.neural_network.architectures.arch_base.Architecture_Base][].
+ Architectures are based on the abstract base class
+ [aucmedi.neural_network.architectures.arch_base.Architecture_Base][].
"""
# List of implemented architectures
@@ -199,9 +201,11 @@
"ConvNeXtSmall": None,
"ConvNeXtLarge": None,
}
-""" Dictionary of recommended [Standardize][aucmedi.data_processing.subfunctions.standardize] techniques for 2D Architectures Methods in AUCMEDI.
+""" Dictionary of recommended [Standardize][aucmedi.data_processing.subfunctions.standardize] techniques for 2D
+Architectures Methods in AUCMEDI.
- The base key (str) can be passed to the [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator] as `standardize_mode` parameter.
+ The base key (str) can be passed to the [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator] as
+ `standardize_mode` parameter.
???+ info
If training a new model from scratch, any Standardize technique can be used at will.
@@ -226,7 +230,8 @@
```
???+ warning
- If using an architecture key for the supported_standardize_mode dictionary, be aware that you have to add "2D." in front of it.
+ If using an architecture key for the supported_standardize_mode dictionary, be aware that you have to add "2D."
+ in front of it.
For example:
```python
@@ -235,3 +240,42 @@
sf_norm = supported_standardize_mode["2D.ResNet101"]
```
"""
+
+
+__all__ = [
+ "Architecture_Base",
+ "architecture_dict",
+ "architectures",
+ "supported_standardize_mode",
+ "Vanilla",
+ "ResNet50",
+ "ResNet101",
+ "ResNet152",
+ "ResNet50V2",
+ "ResNet101V2",
+ "ResNet152V2",
+ "DenseNet121",
+ "DenseNet169",
+ "DenseNet201",
+ "EfficientNetB0",
+ "EfficientNetB1",
+ "EfficientNetB2",
+ "EfficientNetB3",
+ "EfficientNetB4",
+ "EfficientNetB5",
+ "EfficientNetB6",
+ "EfficientNetB7",
+ "InceptionResNetV2",
+ "InceptionV3",
+ "MobileNet",
+ "MobileNetV2",
+ "NASNetMobile",
+ "NASNetLarge",
+ "VGG16",
+ "VGG19",
+ "Xception",
+ "ConvNeXtBase",
+ "ConvNeXtTiny",
+ "ConvNeXtSmall",
+ "ConvNeXtLarge"
+]
diff --git a/aucmedi/neural_network/architectures/image/convnext_base.py b/aucmedi/neural_network/architectures/image/convnext_base.py
index 83788e5d..dda3b88a 100644
--- a/aucmedi/neural_network/architectures/image/convnext_base.py
+++ b/aucmedi/neural_network/architectures/image/convnext_base.py
@@ -41,15 +41,19 @@
10 Jan 2022. A ConvNet for the 2020s.
[https://arxiv.org/abs/2201.03545](https://arxiv.org/abs/2201.03545)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.convnext import ConvNeXtBase as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXtBase #
#-----------------------------------------------------#
@@ -68,8 +72,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/convnext_large.py b/aucmedi/neural_network/architectures/image/convnext_large.py
index 3b50eff6..90d087cb 100644
--- a/aucmedi/neural_network/architectures/image/convnext_large.py
+++ b/aucmedi/neural_network/architectures/image/convnext_large.py
@@ -41,15 +41,19 @@
10 Jan 2022. A ConvNet for the 2020s.
[https://arxiv.org/abs/2201.03545](https://arxiv.org/abs/2201.03545)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.convnext import ConvNeXtLarge as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXtLarge #
#-----------------------------------------------------#
@@ -68,8 +72,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/convnext_small.py b/aucmedi/neural_network/architectures/image/convnext_small.py
index a12f893a..d36f0045 100644
--- a/aucmedi/neural_network/architectures/image/convnext_small.py
+++ b/aucmedi/neural_network/architectures/image/convnext_small.py
@@ -39,15 +39,19 @@
10 Jan 2022. A ConvNet for the 2020s.
[https://arxiv.org/abs/2201.03545](https://arxiv.org/abs/2201.03545)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.convnext import ConvNeXtSmall as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXtSmall #
#-----------------------------------------------------#
@@ -66,8 +70,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/convnext_tiny.py b/aucmedi/neural_network/architectures/image/convnext_tiny.py
index 493373bf..0794b6bb 100644
--- a/aucmedi/neural_network/architectures/image/convnext_tiny.py
+++ b/aucmedi/neural_network/architectures/image/convnext_tiny.py
@@ -39,15 +39,19 @@
10 Jan 2022. A ConvNet for the 2020s.
[https://arxiv.org/abs/2201.03545](https://arxiv.org/abs/2201.03545)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.convnext import ConvNeXtTiny as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXtTiny #
#-----------------------------------------------------#
@@ -66,8 +70,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/densenet121.py b/aucmedi/neural_network/architectures/image/densenet121.py
index 550d84ec..a98beec8 100644
--- a/aucmedi/neural_network/architectures/image/densenet121.py
+++ b/aucmedi/neural_network/architectures/image/densenet121.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import DenseNet121 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet121 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet121 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/densenet169.py b/aucmedi/neural_network/architectures/image/densenet169.py
index 1a66d825..50161b55 100644
--- a/aucmedi/neural_network/architectures/image/densenet169.py
+++ b/aucmedi/neural_network/architectures/image/densenet169.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import DenseNet169 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet169 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet169 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/densenet201.py b/aucmedi/neural_network/architectures/image/densenet201.py
index d5273007..af5c0df9 100644
--- a/aucmedi/neural_network/architectures/image/densenet201.py
+++ b/aucmedi/neural_network/architectures/image/densenet201.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import DenseNet201 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet201 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet201 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb0.py b/aucmedi/neural_network/architectures/image/efficientnetb0.py
index b418187f..89dc3db9 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb0.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb0.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB0 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB0 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb1.py b/aucmedi/neural_network/architectures/image/efficientnetb1.py
index a168fae8..598aa2f7 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb1.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb1.py
@@ -1,4 +1,4 @@
-#==============================================================================#
+# ==============================================================================#
# Author: Dominik Müller #
# Copyright: 2024 IT-Infrastructure for Translational Medical Research, #
# University of Augsburg #
@@ -15,10 +15,10 @@
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see . #
-#==============================================================================#
-#-----------------------------------------------------#
+# ==============================================================================#
+# -----------------------------------------------------#
# Documentation #
-#-----------------------------------------------------#
+# -----------------------------------------------------#
""" The classification variant of the EfficientNetB1 architecture.
| Architecture Variable | Value |
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB1 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB1 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(240, 240),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb2.py b/aucmedi/neural_network/architectures/image/efficientnetb2.py
index 5f2b5001..b6c87682 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb2.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(260, 260),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb3.py b/aucmedi/neural_network/architectures/image/efficientnetb3.py
index 6a325797..e00f7827 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb3.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb3.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB3 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB3 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(300, 300),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb4.py b/aucmedi/neural_network/architectures/image/efficientnetb4.py
index 2311a3eb..ace11610 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb4.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb4.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB4 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB4 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(380, 380),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb5.py b/aucmedi/neural_network/architectures/image/efficientnetb5.py
index cdd17751..5bfb3ec8 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb5.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb5.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB5 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB5 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(456, 456),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb6.py b/aucmedi/neural_network/architectures/image/efficientnetb6.py
index 4b7d5f37..7d6d3ac2 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb6.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb6.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB6 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB6 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(528, 528),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/efficientnetb7.py b/aucmedi/neural_network/architectures/image/efficientnetb7.py
index 88828ffd..7678d0e2 100644
--- a/aucmedi/neural_network/architectures/image/efficientnetb7.py
+++ b/aucmedi/neural_network/architectures/image/efficientnetb7.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import EfficientNetB7 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: EfficientNetB7 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(600, 600),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain EfficientNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/inceptionresnetv2.py b/aucmedi/neural_network/architectures/image/inceptionresnetv2.py
index c98f93ad..bc1bb020 100644
--- a/aucmedi/neural_network/architectures/image/inceptionresnetv2.py
+++ b/aucmedi/neural_network/architectures/image/inceptionresnetv2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import InceptionResNetV2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: InceptionResNetV2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(299, 299),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain InceptionResNetV2 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/inceptionv3.py b/aucmedi/neural_network/architectures/image/inceptionv3.py
index 2977db3f..2005bf68 100644
--- a/aucmedi/neural_network/architectures/image/inceptionv3.py
+++ b/aucmedi/neural_network/architectures/image/inceptionv3.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import InceptionV3 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: InceptionV3 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(299, 299),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain InceptionV3 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/mobilenet.py b/aucmedi/neural_network/architectures/image/mobilenet.py
index ca7d9a45..6ecf45b2 100644
--- a/aucmedi/neural_network/architectures/image/mobilenet.py
+++ b/aucmedi/neural_network/architectures/image/mobilenet.py
@@ -40,11 +40,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import MobileNet as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: MobileNet #
#-----------------------------------------------------#
@@ -63,8 +67,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain MobileNet as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/mobilenetv2.py b/aucmedi/neural_network/architectures/image/mobilenetv2.py
index 66ad4452..ed4bba7e 100644
--- a/aucmedi/neural_network/architectures/image/mobilenetv2.py
+++ b/aucmedi/neural_network/architectures/image/mobilenetv2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import MobileNetV2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: MobileNetV2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain MobileNetV2 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/nasnetlarge.py b/aucmedi/neural_network/architectures/image/nasnetlarge.py
index 19802596..341c1cdc 100644
--- a/aucmedi/neural_network/architectures/image/nasnetlarge.py
+++ b/aucmedi/neural_network/architectures/image/nasnetlarge.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import NASNetLarge as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: NASNetLarge #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(331, 331),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain NASNetLarge as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/nasnetmobile.py b/aucmedi/neural_network/architectures/image/nasnetmobile.py
index 51b70756..c7aa9c1f 100644
--- a/aucmedi/neural_network/architectures/image/nasnetmobile.py
+++ b/aucmedi/neural_network/architectures/image/nasnetmobile.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import NASNetMobile as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: NASNetMobile #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain NASNetMobile as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet101.py b/aucmedi/neural_network/architectures/image/resnet101.py
index c815e645..15399e05 100644
--- a/aucmedi/neural_network/architectures/image/resnet101.py
+++ b/aucmedi/neural_network/architectures/image/resnet101.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.resnet import ResNet101 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet101 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet101 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet101v2.py b/aucmedi/neural_network/architectures/image/resnet101v2.py
index 968369aa..139af613 100644
--- a/aucmedi/neural_network/architectures/image/resnet101v2.py
+++ b/aucmedi/neural_network/architectures/image/resnet101v2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import ResNet101V2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet101V2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet101V2 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet152.py b/aucmedi/neural_network/architectures/image/resnet152.py
index ad2843c5..adf53546 100644
--- a/aucmedi/neural_network/architectures/image/resnet152.py
+++ b/aucmedi/neural_network/architectures/image/resnet152.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.resnet import ResNet152 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet152 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet152 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet152v2.py b/aucmedi/neural_network/architectures/image/resnet152v2.py
index 81436466..639e7ad6 100644
--- a/aucmedi/neural_network/architectures/image/resnet152v2.py
+++ b/aucmedi/neural_network/architectures/image/resnet152v2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import ResNet152V2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet152V2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet152V2 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet50.py b/aucmedi/neural_network/architectures/image/resnet50.py
index 06d0ba71..6e00d460 100644
--- a/aucmedi/neural_network/architectures/image/resnet50.py
+++ b/aucmedi/neural_network/architectures/image/resnet50.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.resnet import ResNet50 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet50 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnet50v2.py b/aucmedi/neural_network/architectures/image/resnet50v2.py
index 1fe2b090..92bbae44 100644
--- a/aucmedi/neural_network/architectures/image/resnet50v2.py
+++ b/aucmedi/neural_network/architectures/image/resnet50v2.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications import ResNet50V2 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet50V2 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50V2 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnext101.py b/aucmedi/neural_network/architectures/image/resnext101.py
index c200c7be..dfdd30f9 100644
--- a/aucmedi/neural_network/architectures/image/resnext101.py
+++ b/aucmedi/neural_network/architectures/image/resnext101.py
@@ -39,12 +39,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from tensorflow import keras
+# Python Standard Library
+
+# Third Party Libraries
from keras_applications.resnext import ResNeXt101 as BaseModel
-# Internal libraries
+from tensorflow import keras
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNeXt101 #
#-----------------------------------------------------#
@@ -63,8 +67,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNeXt101 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/resnext50.py b/aucmedi/neural_network/architectures/image/resnext50.py
index 4c3a52a9..ad6a9d07 100644
--- a/aucmedi/neural_network/architectures/image/resnext50.py
+++ b/aucmedi/neural_network/architectures/image/resnext50.py
@@ -39,12 +39,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from tensorflow import keras
+# Python Standard Library
+
+# Third Party Libraries
from keras_applications.resnext import ResNeXt50 as BaseModel
-# Internal libraries
+from tensorflow import keras
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNeXt50 #
#-----------------------------------------------------#
@@ -63,8 +67,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNeXt50 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/vanilla.py b/aucmedi/neural_network/architectures/image/vanilla.py
index 2d85e293..78012364 100644
--- a/aucmedi/neural_network/architectures/image/vanilla.py
+++ b/aucmedi/neural_network/architectures/image/vanilla.py
@@ -35,12 +35,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv2D, MaxPooling2D
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Vanilla Architecture #
#-----------------------------------------------------#
diff --git a/aucmedi/neural_network/architectures/image/vgg16.py b/aucmedi/neural_network/architectures/image/vgg16.py
index 9a5ad8dc..741b63b0 100644
--- a/aucmedi/neural_network/architectures/image/vgg16.py
+++ b/aucmedi/neural_network/architectures/image/vgg16.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.vgg16 import VGG16 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: VGG16 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain VGG16 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/vgg19.py b/aucmedi/neural_network/architectures/image/vgg19.py
index 2787af6e..d176d659 100644
--- a/aucmedi/neural_network/architectures/image/vgg19.py
+++ b/aucmedi/neural_network/architectures/image/vgg19.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.vgg19 import VGG19 as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: VGG19 #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain VGG19 as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/image/vit_b16.py b/aucmedi/neural_network/architectures/image/vit_b16.py
index 646a4c81..a179cff6 100644
--- a/aucmedi/neural_network/architectures/image/vit_b16.py
+++ b/aucmedi/neural_network/architectures/image/vit_b16.py
@@ -44,21 +44,26 @@
```
@article{dosovitskiy2020vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
- author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
+ author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and
+ Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and
+ Uszkoreit, Jakob and Houlsby, Neil},
journal={ICLR},
year={2021}
}
@article{tolstikhin2021mixer,
title={MLP-Mixer: An all-MLP Architecture for Vision},
- author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
+ author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and
+ Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and
+ Lucic, Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
@article{steiner2021augreg,
title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers},
- author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas},
+ author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob
+ and Beyer, Lucas},
journal={arXiv preprint arXiv:2106.10270},
year={2021}
}
@@ -72,7 +77,8 @@
@article{zhai2022lit,
title={LiT: Zero-Shot Transfer with Locked-image Text Tuning},
- author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov, Alexander and Beyer, Lucas},
+ author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and
+ Kolesnikov, Alexander and Beyer, Lucas},
journal={CVPR},
year={2022}
}
@@ -81,11 +87,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from vit_keras import vit
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: Vision Transformer (ViT) #
#-----------------------------------------------------#
@@ -104,8 +114,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : pretrained = True
- else : pretrained = False
+ if self.pretrained_weights:
+ pretrained = True
+ else:
+ pretrained = False
# Obtain ViT B16 as base model
base_model = vit.vit_b16(image_size=self.input[:-1],
diff --git a/aucmedi/neural_network/architectures/image/vit_b32.py b/aucmedi/neural_network/architectures/image/vit_b32.py
index c3b97c24..e2cef9f7 100644
--- a/aucmedi/neural_network/architectures/image/vit_b32.py
+++ b/aucmedi/neural_network/architectures/image/vit_b32.py
@@ -44,21 +44,26 @@
```
@article{dosovitskiy2020vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
- author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
+ author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and
+ Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and
+ Uszkoreit, Jakob and Houlsby, Neil},
journal={ICLR},
year={2021}
}
@article{tolstikhin2021mixer,
title={MLP-Mixer: An all-MLP Architecture for Vision},
- author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
+ author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and
+ Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and
+ Lucic, Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
@article{steiner2021augreg,
title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers},
- author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas},
+ author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob
+ and Beyer, Lucas},
journal={arXiv preprint arXiv:2106.10270},
year={2021}
}
@@ -72,20 +77,26 @@
@article{zhai2022lit,
title={LiT: Zero-Shot Transfer with Locked-image Text Tuning},
- author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov, Alexander and Beyer, Lucas},
+ author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and
+ Kolesnikov, Alexander and Beyer, Lucas},
journal={CVPR},
year={2022}
}
```
"""
+
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from vit_keras import vit
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: Vision Transformer (ViT) #
#-----------------------------------------------------#
@@ -104,8 +115,10 @@ def __init__(self, classification_head, channels, input_shape=(224, 224),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : pretrained = True
- else : pretrained = False
+ if self.pretrained_weights:
+ pretrained = True
+ else:
+ pretrained = False
# Obtain ViT B32 as base model
base_model = vit.vit_b32(image_size=self.input[:-1],
diff --git a/aucmedi/neural_network/architectures/image/vit_l16.py b/aucmedi/neural_network/architectures/image/vit_l16.py
index f5264d8f..506ed622 100644
--- a/aucmedi/neural_network/architectures/image/vit_l16.py
+++ b/aucmedi/neural_network/architectures/image/vit_l16.py
@@ -44,21 +44,26 @@
```
@article{dosovitskiy2020vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
- author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
+ author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and
+ Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and
+ Uszkoreit, Jakob and Houlsby, Neil},
journal={ICLR},
year={2021}
}
@article{tolstikhin2021mixer,
title={MLP-Mixer: An all-MLP Architecture for Vision},
- author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
+ author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and
+ Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and
+ Lucic, Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
@article{steiner2021augreg,
title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers},
- author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas},
+ author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob
+ and Beyer, Lucas},
journal={arXiv preprint arXiv:2106.10270},
year={2021}
}
@@ -72,7 +77,8 @@
@article{zhai2022lit,
title={LiT: Zero-Shot Transfer with Locked-image Text Tuning},
- author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov, Alexander and Beyer, Lucas},
+ author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov,
+ Alexander and Beyer, Lucas},
journal={CVPR},
year={2022}
}
@@ -81,11 +87,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from vit_keras import vit
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: Vision Transformer (ViT) #
#-----------------------------------------------------#
@@ -104,8 +114,10 @@ def __init__(self, classification_head, channels, input_shape=(384, 384),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : pretrained = True
- else : pretrained = False
+ if self.pretrained_weights:
+ pretrained = True
+ else:
+ pretrained = False
# Obtain ViT L16 as base model
base_model = vit.vit_l16(image_size=self.input[:-1],
diff --git a/aucmedi/neural_network/architectures/image/vit_l32.py b/aucmedi/neural_network/architectures/image/vit_l32.py
index 8d2d812b..0397fdbf 100644
--- a/aucmedi/neural_network/architectures/image/vit_l32.py
+++ b/aucmedi/neural_network/architectures/image/vit_l32.py
@@ -44,21 +44,26 @@
```
@article{dosovitskiy2020vit,
title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale},
- author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil},
+ author={Dosovitskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and
+ Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and
+ Uszkoreit, Jakob and Houlsby, Neil},
journal={ICLR},
year={2021}
}
@article{tolstikhin2021mixer,
title={MLP-Mixer: An all-MLP Architecture for Vision},
- author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and Lucic, Mario and Dosovitskiy, Alexey},
+ author={Tolstikhin, Ilya and Houlsby, Neil and Kolesnikov, Alexander and Beyer, Lucas and Zhai, Xiaohua and
+ Unterthiner, Thomas and Yung, Jessica and Steiner, Andreas and Keysers, Daniel and Uszkoreit, Jakob and Lucic,
+ Mario and Dosovitskiy, Alexey},
journal={arXiv preprint arXiv:2105.01601},
year={2021}
}
@article{steiner2021augreg,
title={How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers},
- author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob and Beyer, Lucas},
+ author={Steiner, Andreas and Kolesnikov, Alexander and and Zhai, Xiaohua and Wightman, Ross and Uszkoreit, Jakob
+ and Beyer, Lucas},
journal={arXiv preprint arXiv:2106.10270},
year={2021}
}
@@ -72,7 +77,8 @@
@article{zhai2022lit,
title={LiT: Zero-Shot Transfer with Locked-image Text Tuning},
- author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov, Alexander and Beyer, Lucas},
+ author={Zhai, Xiaohua and Wang, Xiao and Mustafa, Basil and Steiner, Andreas and Keysers, Daniel and Kolesnikov,
+ Alexander and Beyer, Lucas},
journal={CVPR},
year={2022}
}
@@ -81,11 +87,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from vit_keras import vit
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: Vision Transformer (ViT) #
#-----------------------------------------------------#
@@ -104,8 +114,10 @@ def __init__(self, classification_head, channels, input_shape=(384, 384),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : pretrained = True
- else : pretrained = False
+ if self.pretrained_weights:
+ pretrained = True
+ else:
+ pretrained = False
# Obtain ViT L32 as base model
base_model = vit.vit_l32(image_size=self.input[:-1],
diff --git a/aucmedi/neural_network/architectures/image/xception.py b/aucmedi/neural_network/architectures/image/xception.py
index fe35ce33..442b759d 100644
--- a/aucmedi/neural_network/architectures/image/xception.py
+++ b/aucmedi/neural_network/architectures/image/xception.py
@@ -39,11 +39,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.applications.xception import Xception as BaseModel
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: Xception #
#-----------------------------------------------------#
@@ -62,8 +66,10 @@ def __init__(self, classification_head, channels, input_shape=(299, 299),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain Xception as base model
base_model = BaseModel(include_top=False, weights=model_weights,
diff --git a/aucmedi/neural_network/architectures/volume/__init__.py b/aucmedi/neural_network/architectures/volume/__init__.py
index 69e77f75..41dbb346 100644
--- a/aucmedi/neural_network/architectures/volume/__init__.py
+++ b/aucmedi/neural_network/architectures/volume/__init__.py
@@ -49,6 +49,7 @@
from aucmedi.neural_network.architectures.volume.convnext_base import ConvNeXtBase
from aucmedi.neural_network.architectures.volume.convnext_large import ConvNeXtLarge
+
#-----------------------------------------------------#
# Access Functions to Architecture Classes #
#-----------------------------------------------------#
@@ -76,7 +77,8 @@
}
""" Dictionary of implemented 3D Architectures Methods in AUCMEDI.
- The base key (str) or an initialized Architecture can be passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class as `architecture` parameter.
+ The base key (str) or an initialized Architecture can be passed to the
+ [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] class as `architecture` parameter.
???+ example "Example"
```python title="Recommended via NeuralNetwork class"
@@ -113,7 +115,8 @@
architecture="3D.ResNeXt101"
```
- Architectures are based on the abstract base class [aucmedi.neural_network.architectures.arch_base.Architecture_Base][].
+ Architectures are based on the abstract base class
+ [aucmedi.neural_network.architectures.arch_base.Architecture_Base][].
"""
# List of implemented architectures
@@ -144,9 +147,11 @@
"ConvNeXtBase": None,
"ConvNeXtLarge": None,
}
-""" Dictionary of recommended [Standardize][aucmedi.data_processing.subfunctions.standardize] techniques for 3D Architectures Methods in AUCMEDI.
+""" Dictionary of recommended [Standardize][aucmedi.data_processing.subfunctions.standardize] techniques for 3D
+ Architectures Methods in AUCMEDI.
- The base key (str) can be passed to the [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator] as `standardize_mode` parameter.
+ The base key (str) can be passed to the [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator] as
+ `standardize_mode` parameter.
???+ info
If training a new model from scratch, any Standardize technique can be used at will.
@@ -171,7 +176,8 @@
```
???+ warning
- If using an architecture key for the supported_standardize_mode dictionary, be aware that you have to add "3D." in front of it.
+ If using an architecture key for the supported_standardize_mode dictionary, be aware that you have to add "3D."
+ in front of it.
For example:
```python
@@ -180,3 +186,29 @@
sf_norm = supported_standardize_mode["3D.ResNeXt101"]
```
"""
+
+__all__ = [
+ "Architecture_Base",
+ "Vanilla",
+ "DenseNet121",
+ "DenseNet169",
+ "DenseNet201",
+ "ResNet18",
+ "ResNet34",
+ "ResNet50",
+ "ResNet101",
+ "ResNet152",
+ "ResNeXt50",
+ "ResNeXt101",
+ "MobileNet",
+ "MobileNetV2",
+ "VGG16",
+ "VGG19",
+ "ConvNeXtTiny",
+ "ConvNeXtSmall",
+ "ConvNeXtBase",
+ "ConvNeXtLarge",
+ "architecture_dict",
+ "architectures",
+ "supported_standardize_mode",
+]
diff --git a/aucmedi/neural_network/architectures/volume/convnext_base.py b/aucmedi/neural_network/architectures/volume/convnext_base.py
index f52dd639..08704e51 100644
--- a/aucmedi/neural_network/architectures/volume/convnext_base.py
+++ b/aucmedi/neural_network/architectures/volume/convnext_base.py
@@ -45,11 +45,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXt Base #
#-----------------------------------------------------#
@@ -69,14 +73,16 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ConvNeXtBase as base model
BaseModel, preprocess_input = Classifiers.get("convnext_base")
base_model = BaseModel(include_top=False, weights=model_weights,
input_tensor=None, input_shape=self.input,
- pooling=None,
+ pooling=None,
include_preprocessing=self.preprocessing)
top_model = base_model.output
diff --git a/aucmedi/neural_network/architectures/volume/convnext_large.py b/aucmedi/neural_network/architectures/volume/convnext_large.py
index 751ab19d..e189838c 100644
--- a/aucmedi/neural_network/architectures/volume/convnext_large.py
+++ b/aucmedi/neural_network/architectures/volume/convnext_large.py
@@ -45,11 +45,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXt Large #
#-----------------------------------------------------#
@@ -69,14 +73,16 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ConvNeXtLarge as base model
BaseModel, preprocess_input = Classifiers.get("convnext_large")
base_model = BaseModel(include_top=False, weights=model_weights,
input_tensor=None, input_shape=self.input,
- pooling=None,
+ pooling=None,
include_preprocessing=self.preprocessing)
top_model = base_model.output
diff --git a/aucmedi/neural_network/architectures/volume/convnext_small.py b/aucmedi/neural_network/architectures/volume/convnext_small.py
index 70b72297..ed045188 100644
--- a/aucmedi/neural_network/architectures/volume/convnext_small.py
+++ b/aucmedi/neural_network/architectures/volume/convnext_small.py
@@ -45,11 +45,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXt Small #
#-----------------------------------------------------#
@@ -69,14 +73,16 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ConvNeXtSmall as base model
BaseModel, preprocess_input = Classifiers.get("convnext_small")
base_model = BaseModel(include_top=False, weights=model_weights,
input_tensor=None, input_shape=self.input,
- pooling=None,
+ pooling=None,
include_preprocessing=self.preprocessing)
top_model = base_model.output
diff --git a/aucmedi/neural_network/architectures/volume/convnext_tiny.py b/aucmedi/neural_network/architectures/volume/convnext_tiny.py
index 7be6510c..0aeebc9f 100644
--- a/aucmedi/neural_network/architectures/volume/convnext_tiny.py
+++ b/aucmedi/neural_network/architectures/volume/convnext_tiny.py
@@ -45,11 +45,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ConvNeXt Tiny #
#-----------------------------------------------------#
@@ -69,14 +73,16 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ConvNeXtTiny as base model
BaseModel, preprocess_input = Classifiers.get("convnext_tiny")
base_model = BaseModel(include_top=False, weights=model_weights,
input_tensor=None, input_shape=self.input,
- pooling=None,
+ pooling=None,
include_preprocessing=self.preprocessing)
top_model = base_model.output
diff --git a/aucmedi/neural_network/architectures/volume/densenet121.py b/aucmedi/neural_network/architectures/volume/densenet121.py
index f4c8eaa3..597f1fff 100644
--- a/aucmedi/neural_network/architectures/volume/densenet121.py
+++ b/aucmedi/neural_network/architectures/volume/densenet121.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet121 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet121 as base model
BaseModel, preprocess_input = Classifiers.get("densenet121")
diff --git a/aucmedi/neural_network/architectures/volume/densenet169.py b/aucmedi/neural_network/architectures/volume/densenet169.py
index 3fb25675..b4ba72f9 100644
--- a/aucmedi/neural_network/architectures/volume/densenet169.py
+++ b/aucmedi/neural_network/architectures/volume/densenet169.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet169 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet169 as base model
BaseModel, preprocess_input = Classifiers.get("densenet169")
diff --git a/aucmedi/neural_network/architectures/volume/densenet201.py b/aucmedi/neural_network/architectures/volume/densenet201.py
index 7711cf7f..6ebb00b7 100644
--- a/aucmedi/neural_network/architectures/volume/densenet201.py
+++ b/aucmedi/neural_network/architectures/volume/densenet201.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: DenseNet201 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain DenseNet201 as base model
BaseModel, preprocess_input = Classifiers.get("densenet201")
diff --git a/aucmedi/neural_network/architectures/volume/mobilenet.py b/aucmedi/neural_network/architectures/volume/mobilenet.py
index ae8b262c..ffa9f644 100644
--- a/aucmedi/neural_network/architectures/volume/mobilenet.py
+++ b/aucmedi/neural_network/architectures/volume/mobilenet.py
@@ -42,21 +42,25 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: MobileNet #
#-----------------------------------------------------#
-""" The classification variant of the MobileNet architecture.
-
-Methods:
- __init__ Object creation function
- create_model: Creating the MobileNet model for classification
-"""
class MobileNet(Architecture_Base):
+ """ The classification variant of the MobileNet architecture.
+
+ Methods:
+ __init__ Object creation function
+ create_model: Creating the MobileNet model for classification
+ """
#---------------------------------------------#
# Initialization #
#---------------------------------------------#
@@ -71,8 +75,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain MobileNet as base model
BaseModel, preprocess_input = Classifiers.get("mobilenet")
diff --git a/aucmedi/neural_network/architectures/volume/mobilenetv2.py b/aucmedi/neural_network/architectures/volume/mobilenetv2.py
index a392e0c6..6c876a1f 100644
--- a/aucmedi/neural_network/architectures/volume/mobilenetv2.py
+++ b/aucmedi/neural_network/architectures/volume/mobilenetv2.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: MobileNetV2 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain MobileNetV2 as base model
BaseModel, preprocess_input = Classifiers.get("mobilenetv2")
diff --git a/aucmedi/neural_network/architectures/volume/resnet101.py b/aucmedi/neural_network/architectures/volume/resnet101.py
index 37fa4cf2..5921e928 100644
--- a/aucmedi/neural_network/architectures/volume/resnet101.py
+++ b/aucmedi/neural_network/architectures/volume/resnet101.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet101 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet101 as base model
BaseModel, preprocess_input = Classifiers.get("resnet101")
diff --git a/aucmedi/neural_network/architectures/volume/resnet152.py b/aucmedi/neural_network/architectures/volume/resnet152.py
index dd16eb80..6da782c0 100644
--- a/aucmedi/neural_network/architectures/volume/resnet152.py
+++ b/aucmedi/neural_network/architectures/volume/resnet152.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet152 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet152 as base model
BaseModel, preprocess_input = Classifiers.get("resnet152")
diff --git a/aucmedi/neural_network/architectures/volume/resnet18.py b/aucmedi/neural_network/architectures/volume/resnet18.py
index ba97c558..ce8b5dc2 100644
--- a/aucmedi/neural_network/architectures/volume/resnet18.py
+++ b/aucmedi/neural_network/architectures/volume/resnet18.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet18 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet18 as base model
BaseModel, preprocess_input = Classifiers.get("resnet18")
diff --git a/aucmedi/neural_network/architectures/volume/resnet34.py b/aucmedi/neural_network/architectures/volume/resnet34.py
index a8f66a71..139a6ebe 100644
--- a/aucmedi/neural_network/architectures/volume/resnet34.py
+++ b/aucmedi/neural_network/architectures/volume/resnet34.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet34 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet34 as base model
BaseModel, preprocess_input = Classifiers.get("resnet34")
diff --git a/aucmedi/neural_network/architectures/volume/resnet50.py b/aucmedi/neural_network/architectures/volume/resnet50.py
index 771f69d8..9d7a54b8 100644
--- a/aucmedi/neural_network/architectures/volume/resnet50.py
+++ b/aucmedi/neural_network/architectures/volume/resnet50.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNet50 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNet50 as base model
BaseModel, preprocess_input = Classifiers.get("resnet50")
diff --git a/aucmedi/neural_network/architectures/volume/resnext101.py b/aucmedi/neural_network/architectures/volume/resnext101.py
index 91a8a139..ecf1f796 100644
--- a/aucmedi/neural_network/architectures/volume/resnext101.py
+++ b/aucmedi/neural_network/architectures/volume/resnext101.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNeXt101 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNeXt101 as base model
BaseModel, preprocess_input = Classifiers.get("resnext101")
diff --git a/aucmedi/neural_network/architectures/volume/resnext50.py b/aucmedi/neural_network/architectures/volume/resnext50.py
index 1538b26b..24decf86 100644
--- a/aucmedi/neural_network/architectures/volume/resnext50.py
+++ b/aucmedi/neural_network/architectures/volume/resnext50.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: ResNeXt50 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain ResNeXt50 as base model
BaseModel, preprocess_input = Classifiers.get("resnext50")
diff --git a/aucmedi/neural_network/architectures/volume/vanilla.py b/aucmedi/neural_network/architectures/volume/vanilla.py
index 516bf7f5..8569dab5 100644
--- a/aucmedi/neural_network/architectures/volume/vanilla.py
+++ b/aucmedi/neural_network/architectures/volume/vanilla.py
@@ -35,12 +35,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv3D, MaxPooling3D
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Vanilla Architecture #
#-----------------------------------------------------#
diff --git a/aucmedi/neural_network/architectures/volume/vgg16.py b/aucmedi/neural_network/architectures/volume/vgg16.py
index d7b8303c..b70d9180 100644
--- a/aucmedi/neural_network/architectures/volume/vgg16.py
+++ b/aucmedi/neural_network/architectures/volume/vgg16.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: VGG16 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain VGG16 as base model
BaseModel, preprocess_input = Classifiers.get("vgg16")
diff --git a/aucmedi/neural_network/architectures/volume/vgg19.py b/aucmedi/neural_network/architectures/volume/vgg19.py
index b2544680..c0c6e463 100644
--- a/aucmedi/neural_network/architectures/volume/vgg19.py
+++ b/aucmedi/neural_network/architectures/volume/vgg19.py
@@ -41,11 +41,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from classification_models_3D.tfkeras import Classifiers
-# Internal libraries
+
+# Internal Libraries
from aucmedi.neural_network.architectures import Architecture_Base
+
#-----------------------------------------------------#
# Architecture class: VGG19 #
#-----------------------------------------------------#
@@ -64,8 +68,10 @@ def __init__(self, classification_head, channels, input_shape=(64, 64, 64),
#---------------------------------------------#
def create_model(self):
# Get pretrained image weights from imagenet if desired
- if self.pretrained_weights : model_weights = "imagenet"
- else : model_weights = None
+ if self.pretrained_weights:
+ model_weights = "imagenet"
+ else:
+ model_weights = None
# Obtain VGG19 as base model
BaseModel, preprocess_input = Classifiers.get("vgg19")
diff --git a/aucmedi/neural_network/loss_functions.py b/aucmedi/neural_network/loss_functions.py
index f7f2f876..82b44c42 100644
--- a/aucmedi/neural_network/loss_functions.py
+++ b/aucmedi/neural_network/loss_functions.py
@@ -19,10 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-from tensorflow.keras import backend as K
import tensorflow as tf
+from tensorflow.keras import backend as K
+
#-----------------------------------------------------#
# Focal Loss - Binary #
@@ -44,7 +47,8 @@ def binary_focal_loss(alpha=0.25, gamma=2.0):
??? abstract "Reference - Implementation"
Author: Umberto Griffo
GitHub: [https://github.com/umbertogriffo](https://github.com/umbertogriffo)
- Source: [https://github.com/umbertogriffo/focal-loss-keras](https://github.com/umbertogriffo/focal-loss-keras)
+ Source: [https://github.com/umbertogriffo/focal-loss-keras](https://github.com/umbertogriffo/focal-loss-keras)
+
??? abstract "Reference - Publication"
Focal Loss for Dense Object Detection (Aug 2017)
@@ -57,7 +61,8 @@ def binary_focal_loss(alpha=0.25, gamma=2.0):
Returns:
loss (Loss Function): A TensorFlow compatible loss function. This object can be
- passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] `loss` parameter.
+ passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork]
+ `loss` parameter.
"""
def binary_focal_loss_fixed(y_true, y_pred):
y_true = tf.cast(y_true, tf.float32)
@@ -83,6 +88,7 @@ def binary_focal_loss_fixed(y_true, y_pred):
return binary_focal_loss_fixed
+
#-----------------------------------------------------#
# Focal Loss - Categorical #
#-----------------------------------------------------#
@@ -118,7 +124,8 @@ def categorical_focal_loss(alpha, gamma=2.0):
??? abstract "Reference - Implementation"
Author: Umberto Griffo
GitHub: [https://github.com/umbertogriffo](https://github.com/umbertogriffo)
- Source: [https://github.com/umbertogriffo/focal-loss-keras](https://github.com/umbertogriffo/focal-loss-keras)
+ Source: [https://github.com/umbertogriffo/focal-loss-keras](https://github.com/umbertogriffo/focal-loss-keras)
+
??? abstract "Reference - Publication"
Focal Loss for Dense Object Detection (Aug 2017)
@@ -133,7 +140,8 @@ def categorical_focal_loss(alpha, gamma=2.0):
Returns:
loss (Loss Function): A TensorFlow compatible loss function. This object can be
- passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] `loss` parameter.
+ passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork]
+ `loss` parameter.
"""
alpha = np.array(alpha, dtype=np.float32)
@@ -154,6 +162,7 @@ def categorical_focal_loss_fixed(y_true, y_pred):
return categorical_focal_loss_fixed
+
#-----------------------------------------------------#
# Focal Loss - Multilabel #
#-----------------------------------------------------#
@@ -192,8 +201,9 @@ def multilabel_focal_loss(class_weights, gamma=2.0,
if True labels are sparse. Default value (1.0).
Returns:
loss (Loss Function): A TensorFlow compatible loss function. This object can be
- passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork] `loss` parameter.
- """
+ passed to the [NeuralNetwork][aucmedi.neural_network.model.NeuralNetwork]
+ `loss` parameter.
+ """ # noqa E501
class_weights = K.constant(class_weights, tf.float32)
gamma = K.constant(gamma, tf.float32)
class_sparsity_coefficient = K.constant(class_sparsity_coefficient,
@@ -207,7 +217,7 @@ def focal_loss_function(y_true, y_pred):
cross_entropy_0 = (1.0 - y_true) * (-K.log(K.clip(1.0 - predictions_0,
K.epsilon(), 1.0 - K.epsilon())))
- cross_entropy_1 = y_true *(class_sparsity_coefficient * -K.log(K.clip(
+ cross_entropy_1 = y_true * (class_sparsity_coefficient * -K.log(K.clip(
predictions_1, K.epsilon(), 1.0 - K.epsilon())))
cross_entropy = cross_entropy_1 + cross_entropy_0
diff --git a/aucmedi/neural_network/model.py b/aucmedi/neural_network/model.py
index 435890c7..be4e9029 100644
--- a/aucmedi/neural_network/model.py
+++ b/aucmedi/neural_network/model.py
@@ -19,14 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
from tensorflow.keras.models import load_model
from tensorflow.keras.optimizers import Adam
-import numpy as np
-# Internal libraries/scripts
-from aucmedi.neural_network.architectures import architecture_dict, \
- supported_standardize_mode, \
- Classifier
+
+# Internal Libraries
+from aucmedi.neural_network.architectures import Classifier, architecture_dict, supported_standardize_mode
+
#-----------------------------------------------------#
# Neural Network (model) class #
@@ -145,26 +146,30 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
n_labels (int): Number of classes/labels (important for the last layer).
channels (int): Number of channels. Grayscale:1 or RGB:3.
input_shape (tuple): Input shape of the batch imaging data (including channel axis).
- If None is provided, the default input_shape for the architecture is selected
- from the architecture dictionary.
- architecture (str or Architecture): Key (str) or instance of a neural network model Architecture class instance.
- If a string is provided, the corresponding architecture is selected from the architecture dictionary.
- A string has to begin with either '3D.' or '2D' depending on the classification task.
- By default, a 2D Vanilla Model is used as architecture.
+ If None is provided, the default input_shape for the architecture is
+ selected from the architecture dictionary.
+ architecture (str or Architecture): Key (str) or instance of a neural network model Architecture class
+ instance. If a string is provided, the corresponding architecture is
+ selected from the architecture dictionary. A string has to begin
+ with either '3D.' or '2D' depending on the classification task. By
+ default, a 2D Vanilla Model is used as architecture.
pretrained_weights (bool): Option whether to utilize pretrained weights e.g. from ImageNet.
loss (Metric Function): The metric function which is used as loss for training.
- Any Metric Function defined in Keras, in aucmedi.neural_network.loss_functions or any custom
- metric function, which follows the Keras metric guidelines, can be used.
- metrics (list of Metric Functions): List of one or multiple Metric Functions, which will be shown during training.
- Any Metric Function defined in Keras or any custom metric function, which follows the Keras
- metric guidelines, can be used.
+ Any Metric Function defined in Keras, in
+ aucmedi.neural_network.loss_functions or any custom metric function,
+ which follows the Keras metric guidelines, can be used.
+ metrics (list of Metric Functions): List of one or multiple Metric Functions, which will be shown during
+ training. Any Metric Function defined in Keras or any custom metric
+ function, which follows the Keras metric guidelines, can be used.
activation_output (str): Activation function which should be used in the classification head
([Classifier][aucmedi.neural_network.architectures.classifier]).
Based on [https://www.tensorflow.org/api_docs/python/tf/keras/activations](https://www.tensorflow.org/api_docs/python/tf/keras/activations).
- fcl_dropout (bool): Option whether to utilize an additional Dense & Dropout layer in the classification head
+ fcl_dropout (bool): Option whether to utilize an additional Dense & Dropout layer in the
+ classification head
([Classifier][aucmedi.neural_network.architectures.classifier]).
- meta_variables (int): Number of metadata variables, which should be included in the classification head.
- If `None`is provided, no metadata integration block will be added to the classification head
+ meta_variables (int): Number of metadata variables, which should be included in the
+ classification head. If `None`is provided, no metadata integration
+ block will be added to the classification head
([Classifier][aucmedi.neural_network.architectures.classifier]).
learning_rate (float): Learning rate in which weights of the neural network will be updated.
verbose (int): Option (0/1) how much information should be written to stdout.
@@ -174,13 +179,17 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
However, be aware of unexpected adverse effects (experimental)!
Attributes:
- tf_epochs (int, default=5): Transfer Learning configuration: Number of epochs with frozen layers except classification head.
- tf_lr_start (float, default=1e-4): Transfer Learning configuration: Starting learning rate for frozen layer fitting.
- tf_lr_end (float, default=1e-5): Transfer Learning configuration: Starting learning rate after layer unfreezing.
- meta_input (tuple of int): Meta variable: Input shape of architecture which can be passed to a DataGenerator. For example: (224, 224).
- meta_standardize (str): Meta variable: Recommended standardize_mode of architecture which can be passed to a DataGenerator.
- For example: "torch".
- """
+ tf_epochs (int, default=5): Transfer Learning configuration: Number of epochs with frozen layers
+ except classification head.
+ tf_lr_start (float, default=1e-4): Transfer Learning configuration: Starting learning rate for frozen
+ layer fitting.
+ tf_lr_end (float, default=1e-5): Transfer Learning configuration: Starting learning rate after layer
+ unfreezing.
+ meta_input (tuple of int): Meta variable: Input shape of architecture which can be passed to a
+ DataGenerator. For example: (224, 224).
+ meta_standardize (str): Meta variable: Recommended standardize_mode of architecture which
+ can be passed to a DataGenerator. For example: "torch".
+ """ # noqa E501
# Cache parameters
self.n_labels = n_labels
self.channels = channels
@@ -196,7 +205,7 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
# Assemble architecture parameters
arch_paras = {"channels":channels,
"pretrained_weights": pretrained_weights}
- if input_shape is not None : arch_paras["input_shape"] = input_shape
+ if input_shape is not None: arch_paras["input_shape"] = input_shape
# Assemble classifier parameters
classifier_paras = {"n_labels": n_labels, "fcl_dropout": fcl_dropout,
"activation_output": activation_output}
@@ -276,13 +285,15 @@ def train(self, training_generator, validation_generator=None, epochs=20,
iterations (int): Number of iterations (batches) in a single epoch.
callbacks (list of Callback classes): A list of Callback classes for custom evaluation.
class_weights (dictionary or list): A list or dictionary of float values to handle class unbalance.
- transfer_learning (bool): Option whether a transfer learning training should be performed. If true, a minimum of 5 epochs will be trained.
+ transfer_learning (bool): Option whether a transfer learning training should be performed. If
+ true, a minimum of 5 epochs will be trained.
Returns:
- history (dict): A history dictionary from a Keras history object which contains several logs.
+ history (dict): A history dictionary from a Keras history object which contains several
+ logs.
"""
# Adjust number of iterations in training DataGenerator to allow repitition
- if iterations is not None : training_generator.set_length(iterations)
+ if iterations is not None: training_generator.set_length(iterations)
# Running a standard training process
if not transfer_learning:
# Run training process with the Keras fit function
@@ -299,8 +310,10 @@ def train(self, training_generator, validation_generator=None, epochs=20,
# Freeze all base model layers (all layers after "avg_pool")
lever = False
for layer in reversed(self.model.layers):
- if not lever and layer.name == "avg_pool" : lever = True
- elif lever : layer.trainable = False
+ if not lever and layer.name == "avg_pool":
+ lever = True
+ elif lever:
+ layer.trainable = False
# Compile model with high learning rate
self.model.compile(optimizer=Adam(learning_rate=self.tf_lr_start),
loss=self.loss, metrics=self.metrics)
@@ -333,7 +346,7 @@ def train(self, training_generator, validation_generator=None, epochs=20,
# Return combined history objects
history_out = history
# Reset number of iterations of the training DataGenerator
- if iterations is not None : training_generator.reset_length()
+ if iterations is not None: training_generator.reset_length()
# Return fitting history
return history_out
@@ -343,13 +356,16 @@ def train(self, training_generator, validation_generator=None, epochs=20,
def predict(self, prediction_generator):
""" Prediction function for the Neural Network model.
- The fitted model will predict classifications for the provided [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
+ The fitted model will predict classifications for the provided
+ [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
Args:
- prediction_generator (DataGenerator): A data generator which will be used for inference.
+ prediction_generator (DataGenerator): A data generator which will
+ be used for inference.
Returns:
- preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
+ preds (numpy.ndarray): A NumPy array of predictions formatted with shape
+ (n_samples, n_labels).
"""
# Run inference process with the Keras predict function
preds = self.model.predict(prediction_generator,
diff --git a/aucmedi/sampling/__init__.py b/aucmedi/sampling/__init__.py
index bf13153b..e3b46ec5 100644
--- a/aucmedi/sampling/__init__.py
+++ b/aucmedi/sampling/__init__.py
@@ -41,3 +41,9 @@
#-----------------------------------------------------#
from aucmedi.sampling.split import sampling_split
from aucmedi.sampling.kfold import sampling_kfold
+
+
+__all__ = [
+ "sampling_split",
+ "sampling_kfold"
+]
diff --git a/aucmedi/sampling/iterative.py b/aucmedi/sampling/iterative.py
index 7c2cc509..cf235df0 100644
--- a/aucmedi/sampling/iterative.py
+++ b/aucmedi/sampling/iterative.py
@@ -44,17 +44,19 @@
Aristotle University of Thessaloniki.
[https://link.springer.com/chapter/10.1007/978-3-642-23808-6_10](https://link.springer.com/chapter/10.1007/978-3-642-23808-6_10)
-"""
+""" # noqa E501
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
+from sklearn.model_selection._split import BaseShuffleSplit, _BaseKFold, _validate_shuffle_split
from sklearn.utils import check_random_state
-from sklearn.utils.validation import _num_samples, check_array
from sklearn.utils.multiclass import type_of_target
-from sklearn.model_selection._split import _BaseKFold, _RepeatedSplits, \
- BaseShuffleSplit, _validate_shuffle_split
+from sklearn.utils.validation import _num_samples, check_array
+
#-----------------------------------------------------#
# Subfunction for Iterative Stratification #
@@ -139,6 +141,7 @@ def IterativeStratification(labels, r, random_state):
return test_folds
+
#-----------------------------------------------------#
# KFold Sampling via Iterative Stratification #
#-----------------------------------------------------#
@@ -234,9 +237,9 @@ def split(self, X, y, groups=None):
Args:
X (array-like, shape (n_samples, n_features) ): Training data, where n_samples is the number of samples
and n_features is the number of features.
- Note that providing ``y`` is sufficient to generate the splits and
- hence ``np.zeros(n_samples)`` may be used as a placeholder for
- ``X`` instead of actual training data.
+ Note that providing ``y`` is sufficient to generate the
+ splits and hence ``np.zeros(n_samples)`` may be used as a
+ placeholder for ``X`` instead of actual training data.
y (array-like, shape (n_samples, n_labels) ): The target variable for supervised learning problems.
Multilabel stratification is done based on the y labels.
groups (object, optional): Always ignored, exists for compatibility.
@@ -248,6 +251,7 @@ def split(self, X, y, groups=None):
y = check_array(y, ensure_2d=False, dtype=None)
return super(MultilabelStratifiedKFold, self).split(X, y, groups)
+
#-----------------------------------------------------#
# Split Sampling via Iterative Stratification #
#-----------------------------------------------------#
@@ -301,17 +305,17 @@ def __init__(self, n_splits=10, test_size="default", train_size=None,
The default will change in version 0.21. It will remain 0.1 only
if ``train_size`` is unspecified, otherwise it will complement
the specified ``train_size``.
- train_size (float, int, or None, default is None): If float, should be between 0.0 and 1.0 and represent the
- proportion of the dataset to include in the train split. If
+ train_size (float, int, or None, default is None): If float, should be between 0.0 and 1.0 and represent
+ the proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
- random_state (int, RandomState instance or None, optional): If int, random_state is the seed used by the random number generator;
- If RandomState instance, random_state is the random number generator;
- If None, the random number generator is the RandomState instance used
- by `np.random`. Unlike StratifiedShuffleSplit that only uses
- random_state when ``shuffle`` == True, this multilabel implementation
- always uses the random_state since the iterative stratification
- algorithm breaks ties randomly.
+ random_state (int, RandomState instance or None, optional): If int, random_state is the seed used by the
+ random number generator; If RandomState instance, random_state is
+ the random number generator; If None, the random number generator is
+ the RandomState instance used by `np.random`. Unlike
+ StratifiedShuffleSplit that only uses random_state when ``shuffle``
+ == True, this multilabel implementation always uses the random_state
+ since the iterative stratification algorithm breaks ties randomly.
"""
super(MultilabelStratifiedShuffleSplit, self).__init__(
n_splits=n_splits, test_size=test_size, train_size=train_size, random_state=random_state)
@@ -360,9 +364,9 @@ def split(self, X, y, groups=None):
Args:
X (array-like, shape (n_samples, n_features) ): Training data, where n_samples is the number of samples
and n_features is the number of features.
- Note that providing ``y`` is sufficient to generate the splits and
- hence ``np.zeros(n_samples)`` may be used as a placeholder for
- ``X`` instead of actual training data.
+ Note that providing ``y`` is sufficient to generate the
+ splits and hence ``np.zeros(n_samples)`` may be used as a
+ placeholder for ``X`` instead of actual training data.
y (array-like, shape (n_samples, n_labels) ): The target variable for supervised learning problems.
Multilabel stratification is done based on the y labels.
groups (object, optional): Always ignored, exists for compatibility.
diff --git a/aucmedi/sampling/kfold.py b/aucmedi/sampling/kfold.py
index e62b5e06..a4175cea 100644
--- a/aucmedi/sampling/kfold.py
+++ b/aucmedi/sampling/kfold.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-from sklearn.model_selection import StratifiedKFold, KFold
-# Internal libraries
+from sklearn.model_selection import KFold, StratifiedKFold
+
+# Internal Libraries
from aucmedi.sampling.iterative import MultilabelStratifiedKFold
+
#-----------------------------------------------------#
# Function: Sampling via k-fold cross-validation #
#-----------------------------------------------------#
@@ -93,7 +97,7 @@ def sampling_kfold(samples, labels, metadata=None, n_splits=3,
# Preprocess data
x = np.asarray(samples)
y = np.asarray(labels)
- if metadata is not None : m = np.asarray(metadata)
+ if metadata is not None: m = np.asarray(metadata)
# Apply sampling and generate folds
for train, test in sampler.split(X=samples, y=wk_labels):
diff --git a/aucmedi/sampling/split.py b/aucmedi/sampling/split.py
index 50c55ab6..0acb8d59 100644
--- a/aucmedi/sampling/split.py
+++ b/aucmedi/sampling/split.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-from sklearn.model_selection import StratifiedShuffleSplit, ShuffleSplit
-# Internal libraries
+from sklearn.model_selection import ShuffleSplit, StratifiedShuffleSplit
+
+# Internal Libraries
from aucmedi.sampling.iterative import MultilabelStratifiedShuffleSplit
+
#-----------------------------------------------------#
# Function: Sampling via Percentage Split #
#-----------------------------------------------------#
@@ -63,7 +67,8 @@ def sampling_split(samples, labels, metadata=None, sampling=[0.8, 0.2],
Args:
samples (list of str): List of sample/index encoded as Strings.
labels (numpy.ndarray): NumPy matrix containing the ohe encoded classification.
- metadata (numpy.ndarray): NumPy matrix with additional metadata. Have to be shape (n_samples, meta_variables).
+ metadata (numpy.ndarray): NumPy matrix with additional metadata. Have to be shape
+ (n_samples, meta_variables).
sampling (list of float): List of percentage values with split sizes.
stratified (bool): Option whether to use stratified sampling based on provided labels.
iterative (bool): Option whether to use iterative sampling algorithm.
@@ -75,12 +80,12 @@ def sampling_split(samples, labels, metadata=None, sampling=[0.8, 0.2],
"""
# Verify sampling percentages
if not np.isclose(sum(sampling), 1.0):
- raise ValueError("Sum of Percentage split ratios as sampling do not" + \
- " equal 1", sampling, np.sum(sampling))
+ raise ValueError("Sum of Percentage split ratios as sampling do not"
+ + " equal 1", sampling, np.sum(sampling))
# Initialize leftover with the complete dataset
leftover_samples = np.asarray(samples)
leftover_labels = np.asarray(labels)
- if metadata is not None : leftover_meta = np.asarray(metadata)
+ if metadata is not None: leftover_meta = np.asarray(metadata)
leftover_p = 0.0
# Initialize result list
results = []
@@ -90,8 +95,10 @@ def sampling_split(samples, labels, metadata=None, sampling=[0.8, 0.2],
# For last split, just take leftover data as subset
if i == len(sampling)-1:
# Generate split
- if metadata is None : split = (leftover_samples, leftover_labels)
- else : split = (leftover_samples, leftover_labels, leftover_meta)
+ if metadata is None:
+ split = (leftover_samples, leftover_labels)
+ else:
+ split = (leftover_samples, leftover_labels, leftover_meta)
# Append splitted data and stop
results.append(split)
break
@@ -117,16 +124,15 @@ def sampling_split(samples, labels, metadata=None, sampling=[0.8, 0.2],
# Generate split
if metadata is None:
split = (leftover_samples[subsets[1]], leftover_labels[subsets[1]])
- else : split = (leftover_samples[subsets[1]],
- leftover_labels[subsets[1]],
- leftover_meta[subsets[1]])
+ else:
+ split = (leftover_samples[subsets[1]], leftover_labels[subsets[1]], leftover_meta[subsets[1]])
# Append splitted data
results.append(split)
# Update remaining data
leftover_p += sampling[i]
leftover_samples = leftover_samples[subsets[0]]
leftover_labels = leftover_labels[subsets[0]]
- if metadata is not None : leftover_meta = leftover_meta[subsets[0]]
+ if metadata is not None: leftover_meta = leftover_meta[subsets[0]]
# Return result sampling
return results
diff --git a/aucmedi/utils/callbacks.py b/aucmedi/utils/callbacks.py
index ae3f1588..312c958a 100644
--- a/aucmedi/utils/callbacks.py
+++ b/aucmedi/utils/callbacks.py
@@ -19,8 +19,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-from tensorflow.keras.callbacks import EarlyStopping
+# Python Standard Library
+
+# Third Party Libraries
import pandas as pd
+from tensorflow.keras.callbacks import EarlyStopping
+
#-----------------------------------------------------#
# Custom Callbacks #
@@ -51,6 +55,7 @@ def on_epoch_end(self, epoch, logs=None):
return
super(ThresholdEarlyStopping, self).on_epoch_end(epoch, logs)
+
class MinEpochEarlyStopping(EarlyStopping):
""" Changed baseline to act as a real baseline.
@@ -59,9 +64,9 @@ class MinEpochEarlyStopping(EarlyStopping):
??? abstract "Reference - Implementation"
Author: McLawrence
Source: https://stackoverflow.com/questions/46287403/is-there-a-way-to-implement-early-stopping-in-keras-only-after-the-first-say-1
- """
+ """ # noqa E501
def __init__(self, monitor='val_loss', min_delta=0, patience=0, verbose=0,
- mode='auto', start_epoch = 100): # add argument for starting epoch
+ mode='auto', start_epoch=100): # add argument for starting epoch
super(MinEpochEarlyStopping, self).__init__()
self.start_epoch = start_epoch
@@ -69,6 +74,7 @@ def on_epoch_end(self, epoch, logs=None):
if epoch > self.start_epoch:
super(MinEpochEarlyStopping, self).on_epoch_end(epoch, logs)
+
#-----------------------------------------------------#
# Callback Utils #
#-----------------------------------------------------#
diff --git a/aucmedi/utils/class_weights.py b/aucmedi/utils/class_weights.py
index cfd40cb4..54d32004 100644
--- a/aucmedi/utils/class_weights.py
+++ b/aucmedi/utils/class_weights.py
@@ -19,9 +19,12 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
-from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
+from sklearn.utils.class_weight import compute_class_weight, compute_sample_weight
+
#-----------------------------------------------------#
# Class Weight Computation #
@@ -71,6 +74,7 @@ def compute_class_weights(ohe_array, method="balanced"):
# Return resulting class weights as list and dictionary
return class_weights_list, class_weights_dict
+
#-----------------------------------------------------#
# Multi-Label Weight Computation #
#-----------------------------------------------------#
@@ -106,6 +110,7 @@ def compute_multilabel_weights(ohe_array, method="balanced"):
# Return resulting class weight list
return class_weights
+
#-----------------------------------------------------#
# Sample Weight Computation #
#-----------------------------------------------------#
@@ -113,7 +118,8 @@ def compute_sample_weights(ohe_array, method="balanced"):
""" Simple wrapper function for scikit learn sample_weight function.
The sample weights can be used for weighting the loss function on imbalanced data.
- Returned sample weight array which can be directly fed to an AUCMEDI [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
+ Returned sample weight array which can be directly fed to an AUCMEDI
+ [DataGenerator][aucmedi.data_processing.data_generator.DataGenerator].
???+ info
NumPy array shape has to be (n_samples, n_classes) like this: (500, 4).
diff --git a/aucmedi/utils/visualizer.py b/aucmedi/utils/visualizer.py
index de99714b..450e9b71 100644
--- a/aucmedi/utils/visualizer.py
+++ b/aucmedi/utils/visualizer.py
@@ -19,10 +19,13 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
+
+# Third Party Libraries
+import matplotlib.cm as cm
import numpy as np
from PIL import Image
-import matplotlib.cm as cm
+
#-----------------------------------------------------#
# Image Visualizer #
@@ -40,12 +43,15 @@ def visualize_array(array, out_path=None):
# Ensure integer intensity values
array = np.uint8(array)
# Remove channel axis if grayscale
- if array.shape[-1] == 1 : array = np.reshape(array, array.shape[:-1])
+ if array.shape[-1] == 1: array = np.reshape(array, array.shape[:-1])
# Convert array to PIL image
image = Image.fromarray(array)
# Visualize or store image
- if out_path is None : image.show()
- else : image.save(out_path)
+ if out_path is None:
+ image.show()
+ else:
+ image.save(out_path)
+
#-----------------------------------------------------#
# XAI Heatmap Visualizer #
@@ -66,7 +72,7 @@ def visualize_heatmap(image, heatmap, out_path=None, alpha=0.4):
alpha (float): Transparency value for heatmap overlap on image (range: [0-1]).
"""
# If image is grayscale, convert to RGB
- if image.shape[-1] == 1 : image = np.concatenate((image,)*3, axis=-1)
+ if image.shape[-1] == 1: image = np.concatenate((image,)*3, axis=-1)
# Rescale heatmap to grayscale range
heatmap = np.uint8(heatmap * 255)
# Use jet colormap to colorize heatmap
@@ -80,5 +86,7 @@ def visualize_heatmap(image, heatmap, out_path=None, alpha=0.4):
si_img = si_img.astype(np.uint8)
pil_img = Image.fromarray(si_img)
# Visualize or store image
- if out_path is None : pil_img.show()
- else : pil_img.save(out_path)
+ if out_path is None:
+ pil_img.show()
+ else:
+ pil_img.save(out_path)
diff --git a/aucmedi/xai/__init__.py b/aucmedi/xai/__init__.py
index efdbff83..2e30b69b 100644
--- a/aucmedi/xai/__init__.py
+++ b/aucmedi/xai/__init__.py
@@ -50,3 +50,9 @@
# Import XAI functionalities
from aucmedi.xai.methods import xai_dict
from aucmedi.xai.decoder import xai_decoder
+
+
+__all__ = [
+ "xai_dict",
+ "xai_decoder"
+]
diff --git a/aucmedi/xai/decoder.py b/aucmedi/xai/decoder.py
index ab73df6b..42cdefc1 100644
--- a/aucmedi/xai/decoder.py
+++ b/aucmedi/xai/decoder.py
@@ -19,14 +19,18 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
-import numpy as np
+# Python Standard Library
import os
-# AUCMEDI Libraries
-from aucmedi.xai.methods import xai_dict
+
+# Third Party Libraries
+import numpy as np
+
+# Internal Libraries
from aucmedi.data_processing.io_loader import image_loader
-from aucmedi.utils.visualizer import visualize_heatmap
from aucmedi.data_processing.subfunctions import Resize
+from aucmedi.utils.visualizer import visualize_heatmap
+from aucmedi.xai.methods import xai_dict
+
#-----------------------------------------------------#
# XAI - Decoder #
@@ -66,29 +70,36 @@ def xai_decoder(data_gen, model, preds=None, method="gradcam", layerName=None,
Args:
data_gen (DataGenerator): A data generator which will be used for inference.
- model (NeuralNetwork): Instance of a AUCMEDI neural network class.
- preds (numpy.ndarray): NumPy Array of classification prediction encoded as OHE (output of a AUCMEDI prediction).
- method (str): XAI method class instance or index. By default, GradCAM is used as XAI method.
- layerName (str): Layer name of the convolutional layer for heatmap computation. If `None`, the last conv layer is used.
- alpha (float): Transparency value for heatmap overlap plotting on input image (range: [0-1]).
+ model (NeuralNetwork): Instance of a AUCMEDI neural network class.
+ preds (numpy.ndarray): NumPy Array of classification prediction encoded as OHE (output of a AUCMEDI
+ prediction).
+ method (str): XAI method class instance or index. By default, GradCAM is used as XAI
+ method.
+ layerName (str): Layer name of the convolutional layer for heatmap computation. If `None`,
+ the last conv layer is used.
+ alpha (float): Transparency value for heatmap overlap plotting on input image
+ (range: [0-1]).
out_path (str): Output path in which heatmaps are saved to disk as PNG files.
Returns:
- images (numpy.ndarray): Combined array of images. Will be only returned if `out_path` parameter is `None`.
- heatmaps (numpy.ndarray): Combined array of XAI heatmaps. Will be only returned if `out_path` parameter is `None`.
+ images (numpy.ndarray): Combined array of images. Will be only returned if `out_path` parameter is
+ `None`.
+ heatmaps (numpy.ndarray): Combined array of XAI heatmaps. Will be only returned if `out_path`
+ parameter is `None`.
"""
# Initialize & access some variables
- batch_size = data_gen.batch_size
n_classes = model.n_labels
sample_list = data_gen.samples
# Prepare XAI output methods
res_img = []
res_xai = []
- if out_path is not None and not os.path.exists(out_path) : os.mkdir(out_path)
+ if out_path is not None and not os.path.exists(out_path):
+ os.mkdir(out_path)
# Initialize xai method
if isinstance(method, str) and method in xai_dict:
xai_method = xai_dict[method](model.model, layerName=layerName)
- else : xai_method = method
+ else:
+ xai_method = method
# Iterate over all samples
for i in range(0, len(sample_list)):
@@ -118,17 +129,18 @@ def xai_decoder(data_gen, model, preds=None, method="gradcam", layerName=None,
postprocess_output(sample_list[i], img_org, sample_maps, n_classes,
data_gen, res_img, res_xai, out_path, alpha)
# Return output directly if no output path is defined (and convert to NumPy)
- if out_path is None : return np.array(res_img), np.array(res_xai)
+ if out_path is None: return np.array(res_img), np.array(res_xai)
+
#-----------------------------------------------------#
# Subroutine: Output Postprocessing #
#-----------------------------------------------------#
-""" Helper/Subroutine function for XAI Decoder.
-
-Caches heatmap for direct output or generates a visualization as PNG.
-"""
def postprocess_output(sample, image, xai_map, n_classes, data_gen,
res_img, res_xai, out_path, alpha):
+ """ Helper/Subroutine function for XAI Decoder.
+
+ Caches heatmap for direct output or generates a visualization as PNG.
+ """
# Update result lists for direct output
if out_path is None:
res_img.append(image)
@@ -138,8 +150,9 @@ def postprocess_output(sample, image, xai_map, n_classes, data_gen,
# Create XAI path
if data_gen.image_format:
xai_file = sample + "." + data_gen.image_format
- else : xai_file = sample
- if os.sep in xai_file : xai_file = xai_file.replace(os.sep, ".")
+ else:
+ xai_file = sample
+ if os.sep in xai_file: xai_file = xai_file.replace(os.sep, ".")
path_xai = os.path.join(out_path, xai_file)
# If preds given, output only argmax class heatmap
if len(xai_map.shape) == 2:
diff --git a/aucmedi/xai/methods/__init__.py b/aucmedi/xai/methods/__init__.py
index 64a9d970..7b2d8ca7 100644
--- a/aucmedi/xai/methods/__init__.py
+++ b/aucmedi/xai/methods/__init__.py
@@ -30,6 +30,7 @@
from aucmedi.xai.methods.lime_pro import LimePro
from aucmedi.xai.methods.lime_con import LimeCon
+
#-----------------------------------------------------#
# XAI method dictionary #
#-----------------------------------------------------#
@@ -60,7 +61,8 @@
}
""" Dictionary of implemented XAI Methods in AUCMEDI.
- A key (str) or an initialized XAI Method can be passed to the [aucmedi.xai.decoder.xai_decoder][] function as method parameter.
+ A key (str) or an initialized XAI Method can be passed to the [aucmedi.xai.decoder.xai_decoder][] function as method
+ parameter.
???+ example "Example"
```python
@@ -75,3 +77,16 @@
XAI Methods are based on the abstract base class [aucmedi.xai.methods.xai_base][].
"""
+
+__all__ = [
+ "xai_dict",
+ "GradCAM",
+ "GradCAMpp",
+ "GuidedGradCAM",
+ "SaliencyMap",
+ "GuidedBackpropagation",
+ "IntegratedGradients",
+ "OcclusionSensitivity",
+ "LimePro",
+ "LimeCon"
+]
diff --git a/aucmedi/xai/methods/gradcam.py b/aucmedi/xai/methods/gradcam.py
index 7204e905..08e0f92f 100644
--- a/aucmedi/xai/methods/gradcam.py
+++ b/aucmedi/xai/methods/gradcam.py
@@ -19,19 +19,24 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Gradient-weighted Class Activation Mapping #
#-----------------------------------------------------#
class GradCAM(XAImethod_Base):
""" XAI Method for Gradient-weighted Class Activation Mapping (Grad-CAM).
- Normally, this class is used internally in the [aucmedi.xai.decoder.xai_decoder][] in the AUCMEDI XAI module.
+ Normally, this class is used internally in the
+ [aucmedi.xai.decoder.xai_decoder][] in the AUCMEDI XAI module.
??? abstract "Reference - Implementation #1"
Author: François Chollet
@@ -44,14 +49,16 @@ class GradCAM(XAImethod_Base):
[https://www.pyimagesearch.com/2020/03/09/grad-cam-visualize-class-activation-maps-with-keras-tensorflow-and-deep-learning/](https://www.pyimagesearch.com/2020/03/09/grad-cam-visualize-class-activation-maps-with-keras-tensorflow-and-deep-learning/)
??? abstract "Reference - Publication"
- Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, Dhruv Batra. 7 Oct 2016.
- Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization.
+ Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna
+ Vedantam, Devi Parikh, Dhruv Batra. 7 Oct 2016.
+ Grad-CAM: Visual Explanations from Deep Networks via Gradient-based
+ Localization.
[https://arxiv.org/abs/1610.02391](https://arxiv.org/abs/1610.02391)
This class provides functionality for running the compute_heatmap function,
which computes a Grad-CAM heatmap for an image with a model.
- """
+ """ # noqa E501
def __init__(self, model, layerName=None):
""" Initialization function for creating a Grad-CAM as XAI Method object.
@@ -63,7 +70,7 @@ def __init__(self, model, layerName=None):
self.model = model
self.layerName = layerName
# Try to find output layer if not defined
- if self.layerName is None : self.layerName = self.find_output_layer()
+ if self.layerName is None: self.layerName = self.find_output_layer()
#---------------------------------------------#
# Identify Output Layer #
@@ -86,7 +93,8 @@ def find_output_layer(self):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Grad-CAM heatmap for a provided image and for specific classification outcome.
+ """ Core function for computing the Grad-CAM heatmap for a provided image and for specific classification
+ outcome.
???+ attention
Be aware that the image has to be provided in batch format.
diff --git a/aucmedi/xai/methods/gradcam_guided.py b/aucmedi/xai/methods/gradcam_guided.py
index 54aacded..b36ef84a 100644
--- a/aucmedi/xai/methods/gradcam_guided.py
+++ b/aucmedi/xai/methods/gradcam_guided.py
@@ -19,13 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-import tensorflow as tf
+
# Internal Libraries
-from aucmedi.xai.methods.xai_base import XAImethod_Base
-from aucmedi.xai.methods import GuidedBackpropagation, GradCAM
from aucmedi.data_processing.subfunctions import Resize
+from aucmedi.xai.methods import GradCAM, GuidedBackpropagation
+from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Guided Grad-CAM #
@@ -33,7 +36,8 @@
class GuidedGradCAM(XAImethod_Base):
""" XAI Method for Guided Grad-CAM.
- Normally, this class is used internally in the [aucmedi.xai.decoder.xai_decoder][] in the AUCMEDI XAI module.
+ Normally, this class is used internally in the
+ [aucmedi.xai.decoder.xai_decoder][] in the AUCMEDI XAI module.
??? abstract "Reference - Implementation"
Author: Swapnil Ahlawat
@@ -47,14 +51,14 @@ class GuidedGradCAM(XAImethod_Base):
[https://arxiv.org/abs/1412.6806](https://arxiv.org/abs/1412.6806)
??? abstract "Reference - Publication #2"
- Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, Dhruv Batra. 7 Oct 2016.
- Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization.
+ Ramprasaath R. Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, Dhruv Batra.
+ 7 Oct 2016. Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization.
[https://arxiv.org/abs/1610.02391](https://arxiv.org/abs/1610.02391)
This class provides functionality for running the compute_heatmap function,
which computes a Guided Grad-CAM heatmap for an image with a model.
- """
+ """ # noqa E501
def __init__(self, model, layerName=None):
""" Initialization function for creating a Guided Grad-CAM as XAI Method object.
@@ -70,7 +74,8 @@ def __init__(self, model, layerName=None):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Guided Grad-CAM heatmap for a provided image and for specific classification outcome.
+ """ Core function for computing the Guided Grad-CAM heatmap for a provided image and for specific classification
+ outcome.
???+ attention
Be aware that the image has to be provided in batch format.
diff --git a/aucmedi/xai/methods/gradcam_pp.py b/aucmedi/xai/methods/gradcam_pp.py
index c7455262..00c4df8f 100644
--- a/aucmedi/xai/methods/gradcam_pp.py
+++ b/aucmedi/xai/methods/gradcam_pp.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# XAI Method: Grad-Cam++ #
#-----------------------------------------------------#
@@ -37,7 +41,8 @@ class GradCAMpp(XAImethod_Base):
Author: Samson Woof
GitHub Profile: [https://github.com/samson6460](https://github.com/samson6460)
Date: May 21, 2020
- [https://github.com/samson6460/tf_keras_gradcamplusplus](https://github.com/samson6460/tf_keras_gradcamplusplus)
+ [https://github.com/samson6460/tf_keras_gradcamplusplus](https://github.com/samson6460/tf_keras_gradcamplusplus)
+
??? abstract "Reference - Publication"
Aditya Chattopadhay; Anirban Sarkar; Prantik Howlader; Vineeth N Balasubramanian. 07 May 2018.
@@ -59,7 +64,7 @@ def __init__(self, model, layerName=None):
self.model = model
self.layerName = layerName
# Try to find output layer if not defined
- if self.layerName is None : self.layerName = self.find_output_layer()
+ if self.layerName is None: self.layerName = self.find_output_layer()
#---------------------------------------------#
# Identify Output Layer #
@@ -82,7 +87,8 @@ def find_output_layer(self):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Grad-CAM++ heatmap for a provided image and for specific classification outcome.
+ """ Core function for computing the Grad-CAM++ heatmap for a provided image and for specific classification
+ outcome.
???+ attention
Be aware that the image has to be provided in batch format.
diff --git a/aucmedi/xai/methods/guided_backprop.py b/aucmedi/xai/methods/guided_backprop.py
index c522a4d1..3a6ac93d 100644
--- a/aucmedi/xai/methods/guided_backprop.py
+++ b/aucmedi/xai/methods/guided_backprop.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Guided Backpropagation #
#-----------------------------------------------------#
@@ -58,13 +62,14 @@ class GuidedBackpropagation(XAImethod_Base):
This class provides functionality for running the compute_heatmap function,
which computes a Guided Backpropagation for an image with a model.
- """
+ """ # noqa E501
def __init__(self, model, layerName=None):
""" Initialization function for creating Guided Backpropagation as XAI Method object.
Args:
- model (keras.model): Keras model object.
- layerName (str): Not required in Guided Backpropagation, but defined by Abstract Base Class.
+ model (keras.model): Keras model object.
+ layerName (str): Not required in Guided Backpropagation, but defined by Abstract
+ Base Class.
"""
# Create a deep copy of the model
model_copy = tf.keras.models.clone_model(model)
@@ -76,7 +81,7 @@ def __init__(self, model, layerName=None):
@tf.custom_gradient
def guidedRelu(x):
def grad(dy):
- return tf.cast(dy>0, "float32") * tf.cast(x>0, "float32") * dy
+ return tf.cast(dy > 0, "float32") * tf.cast(x > 0, "float32") * dy
return tf.nn.relu(x), grad
# Replace Relu activation layers with custom Relu activation layer
layer_dict = [layer for layer in model_copy.layers if hasattr(layer, "activation")]
@@ -90,7 +95,8 @@ def grad(dy):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Guided Backpropagation for a provided image and for specific classification outcome.
+ """ Core function for computing the Guided Backpropagation for a provided image and for specific classification
+ outcome.
???+ attention
Be aware that the image has to be provided in batch format.
diff --git a/aucmedi/xai/methods/integrated_gradients.py b/aucmedi/xai/methods/integrated_gradients.py
index d4385a3a..0ba30461 100644
--- a/aucmedi/xai/methods/integrated_gradients.py
+++ b/aucmedi/xai/methods/integrated_gradients.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Integrated Gradients #
#-----------------------------------------------------#
@@ -37,7 +41,8 @@ class IntegratedGradients(XAImethod_Base):
Author: Aakash Kumar Nain
GitHub Profile: [https://github.com/AakashKumarNain](https://github.com/AakashKumarNain)
Date: Jun 02, 2020
- [https://keras.io/examples/vision/integrated_gradients](https://keras.io/examples/vision/integrated_gradients)
+ [https://keras.io/examples/vision/integrated_gradients](https://keras.io/examples/vision/integrated_gradients)
+
??? abstract "Reference - Publication"
Mukund Sundararajan, Ankur Taly, Qiqi Yan. 04 Mar 2017.
@@ -53,7 +58,8 @@ def __init__(self, model, layerName=None, num_steps=50):
Args:
model (keras.model): Keras model object.
- layerName (str): Not required in Integrated Gradients Maps, but defined by Abstract Base Class.
+ layerName (str): Not required in Integrated Gradients Maps, but defined by Abstract
+ Base Class.
num_steps (int): Number of iterations for interpolation.
"""
# Cache class parameters
@@ -64,7 +70,8 @@ def __init__(self, model, layerName=None, num_steps=50):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Integrated Gradients Map for a provided image and for specific classification outcome.
+ """ Core function for computing the Integrated Gradients Map for a provided image and for specific
+ classification outcome.
???+ attention
Be aware that the image has to be provided in batch format.
diff --git a/aucmedi/xai/methods/lime_con.py b/aucmedi/xai/methods/lime_con.py
index a69d21eb..a04fe885 100644
--- a/aucmedi/xai/methods/lime_con.py
+++ b/aucmedi/xai/methods/lime_con.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
from lime import lime_image
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# LIME: Con Features #
#-----------------------------------------------------#
diff --git a/aucmedi/xai/methods/lime_pro.py b/aucmedi/xai/methods/lime_pro.py
index 1e8cd599..d8c81ff7 100644
--- a/aucmedi/xai/methods/lime_pro.py
+++ b/aucmedi/xai/methods/lime_pro.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
from lime import lime_image
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# LIME: Pro Features #
#-----------------------------------------------------#
diff --git a/aucmedi/xai/methods/occlusion_sensitivity.py b/aucmedi/xai/methods/occlusion_sensitivity.py
index d907070d..640e7019 100644
--- a/aucmedi/xai/methods/occlusion_sensitivity.py
+++ b/aucmedi/xai/methods/occlusion_sensitivity.py
@@ -19,12 +19,15 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
-import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Occlusion Sensitivity #
#-----------------------------------------------------#
@@ -41,13 +44,14 @@ class OcclusionSensitivity(XAImethod_Base):
This class provides functionality for running the compute_heatmap function,
which computes a Occlusion Sensitivity Map for an image with a model.
- """
+ """ # noqa E501
def __init__(self, model, layerName=None, patch_size=16):
""" Initialization function for creating a Occlusion Sensitivity Map as XAI Method object.
Args:
model (keras.model): Keras model object.
- layerName (str): Not required in Occlusion Sensitivity Maps, but defined by Abstract Base Class.
+ layerName (str): Not required in Occlusion Sensitivity Maps, but defined by Abstract
+ Base Class.
"""
# Cache class parameters
self.model = model
@@ -57,7 +61,8 @@ def __init__(self, model, layerName=None, patch_size=16):
# Heatmap Computation #
#---------------------------------------------#
def compute_heatmap(self, image, class_index, eps=1e-8):
- """ Core function for computing the Occlusion Sensitivity Map for a provided image and for specific classification outcome.
+ """ Core function for computing the Occlusion Sensitivity Map for a provided image and for specific
+ classification outcome.
???+ attention
Be aware that the image has to be provided in batch format.
@@ -95,6 +100,7 @@ def compute_heatmap(self, image, class_index, eps=1e-8):
# Return the resulting sensitivity map (automatically a heatmap)
return sensitivity_map
+
#-----------------------------------------------------#
# Subroutines #
#-----------------------------------------------------#
diff --git a/aucmedi/xai/methods/saliency.py b/aucmedi/xai/methods/saliency.py
index b6d3dca1..a3f27458 100644
--- a/aucmedi/xai/methods/saliency.py
+++ b/aucmedi/xai/methods/saliency.py
@@ -19,12 +19,16 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External Libraries
+# Python Standard Library
+
+# Third Party Libraries
import numpy as np
import tensorflow as tf
+
# Internal Libraries
from aucmedi.xai.methods.xai_base import XAImethod_Base
+
#-----------------------------------------------------#
# Saliency Maps / Backpropagation #
#-----------------------------------------------------#
diff --git a/aucmedi/xai/methods/xai_base.py b/aucmedi/xai/methods/xai_base.py
index bb58ee98..914fdcda 100644
--- a/aucmedi/xai/methods/xai_base.py
+++ b/aucmedi/xai/methods/xai_base.py
@@ -19,9 +19,10 @@
#-----------------------------------------------------#
# Library imports #
#-----------------------------------------------------#
-# External libraries
+# Python Standard Library
from abc import ABC, abstractmethod
+
#-----------------------------------------------------#
# Abstract Base Class for XAI Methods #
#-----------------------------------------------------#