diff --git a/index.html b/index.html
index 444e1c6..6d2542d 100644
--- a/index.html
+++ b/index.html
@@ -991,6 +991,38 @@
},
+ { cols: [
+ { view: "label", label: "Auto Threshold Input",
+ align: "left"
+ },
+
+ {
+ view: "select",
+ id: "autoThresholdStatus",
+ value: 1,
+ options: ["0", "0.1", "0.2"]
+ },
+
+ {
+ view: "label",
+ label: '',
+ css: {"color":"black !important", "font-weight": "bold", "cursor": "pointer"},
+ width: 25,
+ height: 25,
+ click: function() {
+ let info = "For speed-up the inference with limited browser memory, auto thresholding the brain from noisy voxels around it before cropping it and feeding the result to the inference model can lower memory use.";
+ $$("modelTooltip").show();
+ document.getElementById("tooltipDiv").innerHTML =
+ "   "+ info +" "
+ }
+ }
+
+ ]
+ },
+
+
+
+
{ cols: [
{ view: "label", label: "Crop Input",
@@ -1280,6 +1312,7 @@
enableTranspose:($$("transposeStatus").getValue() === 'true'),
enableCrop: ($$("cropStatus").getValue() === 'true'),
cropPadding: parseInt($$("cropPadStatus").getValue()),
+ autoThreshold: parseInt($$("autoThresholdStatus").getValue()),
enableQuantileNorm: ($$("quantileNormStatus").getValue() === 'true'),
filterOutWithPreMask: ($$("filterByMaskStatus").getValue() === 'true'),
enableSeqConv: ($$("seqConvStatus").getValue() === 'true'),
@@ -1320,7 +1353,7 @@
webix.ui({
view:"window",
id: "modelBrowsingWindow",
- height:750,
+ height:770,
width:400,
head:{
view:"toolbar", css: "toolbarclass", elements:[
diff --git a/js/brainchop/mainMeshNetFunctions.js b/js/brainchop/mainMeshNetFunctions.js
index d8c563d..1c41adc 100644
--- a/js/brainchop/mainMeshNetFunctions.js
+++ b/js/brainchop/mainMeshNetFunctions.js
@@ -1257,6 +1257,123 @@ rgbToHex = (rgbObj) => {
+/**
+* Get MRI after threshold noisy voxels around the brain for better cropping later
+* @since 3.0.0
+* @param {tf.Tensor} tensor - Tensor3d, e.g. Tensor3d of all MRI volume data
+* @param {number} percentage - Threshold percentage is just a number between 0 and 1
+* @returns {tf.Tensor}
+*
+*/
+
+
+applyMriThreshold = async(tensor, percentage) => {
+ // Perform asynchronous operations outside of tf.tidy
+ const maxTensor = tensor.max();
+ const thresholdTensor = maxTensor.mul(percentage);
+ const threshold = await thresholdTensor.data(); // Extracts the threshold value
+
+ // Dispose tensors not needed anymore
+ maxTensor.dispose();
+ thresholdTensor.dispose();
+
+ // Use tf.tidy for synchronous operations
+ const denoisedMriData = tf.tidy(() => {
+ const dataForProcessing = tensor.clone();
+
+ // Thresholding (assuming background has very low values compared to the head)
+ const mask = dataForProcessing.greater(threshold[0]);
+ const denoisedMriData = dataForProcessing.mul(mask);
+
+ // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto.
+ return denoisedMriData;
+ });
+
+ return denoisedMriData;
+}
+
+
+
+
+/**
+* Get MRI copping coordinates after threshold
+* @since 3.0.0
+* @param {tf.Tensor} tensor - Tensor3d, e.g. Tensor3d of all MRI volume data
+* @param {number} percentage - Threshold percentage is just a number between 0 and 1
+* @returns {Array}
+* @example
+*
+* arr = Array.from({length: 27}, (x, i) => i/10)
+* => Array(27) [ 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, … , 2.6]
+*
+* cropped = await cropTensorWithThreshold ( tf.tensor( Array.from({length: 27}, (x, i) => i/10) , [3, 3, 3]), 0.2 )
+*
+* => Array [ {…}, {…} ]
+*
+* cropped[0].print()
+*
+*/
+
+
+ cropTensorWithThreshold = async(tensor, percentage) => {
+
+ // Find the maximum value of the tensor
+ const maxTensor = tensor.max();
+
+ // Multiply the maximum value by the thresholdRatio to get % of the max
+ const thresholdTensor = maxTensor.mul(percentage);
+
+ // Extract the value from the tensor
+ const threshold = await thresholdTensor.data();
+
+ const dataForProcessing = tensor.clone();
+
+ // Thresholding (assuming background has very low values compared to the head)
+ const mask = dataForProcessing.greater(threshold[0]);
+ const masked_data = dataForProcessing.mul(mask);
+
+ // Find the bounding box around the head (non-zero region) in the filtered data
+ const indices = await tf.whereAsync(masked_data.greater(0));
+ dataForProcessing.dispose();
+ mask.dispose();
+ masked_data.dispose();
+
+ // Extract z, y, x coordinates from the indices
+ const zs = indices.slice([0, 0], [indices.shape[0], 1]); // z coordinates
+ const ys = indices.slice([0, 1], [indices.shape[0], 1]); // y coordinates
+ const xs = indices.slice([0, 2], [indices.shape[0], 1]); // x coordinates
+
+ // Compute min and max indices for each dimension
+ const min_z = zs.min().arraySync();
+ const max_z = zs.max().arraySync();
+ const min_y = ys.min().arraySync();
+ const max_y = ys.max().arraySync();
+ const min_x = xs.min().arraySync();
+ const max_x = xs.max().arraySync();
+
+ // Crop the original tensor using the bounding box from the filtered data
+ const cropped_tensor = tensor.slice([min_z, min_y, min_x], [max_z - min_z + 1, max_y - min_y + 1, max_x - min_x + 1]);
+
+ // Clean up tensors to free memory
+ indices.dispose();
+ zs.dispose();
+ ys.dispose();
+ xs.dispose();
+
+ // Return the cropped tensor along with the min and max indices
+ return [cropped_tensor, {
+ minZ: min_z,
+ maxZ: max_z,
+ minY: min_y,
+ maxY: max_y,
+ minX: min_x,
+ maxX: max_x
+ }];
+
+ }
+
+
+
/**
* load pre-trained model from local drive
*
@@ -4751,16 +4868,28 @@ function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad,
let mask_3d;
- if(pipeline1_out == null) {
- // binarize original image
- mask_3d = slices_3d.greater([0]).asType('bool');
+ if(pipeline1_out == null) { // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ let autoThresholdValue = inferenceModelsList[$$("selectModel").getValue() - 1]["autoThreshold"];
+
+ if( (autoThresholdValue > 0) && (autoThresholdValue <= 1) ) {
+
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ slices_3d = await applyMriThreshold(slices_3d, autoThresholdValue);
+ } else {
+ console.log("No valid crop threshold value");
+ }
+
+ // binarize original image
+ mask_3d = slices_3d.greater([0]).asType('bool');
} else {
- mask_3d = pipeline1_out.greater([0]).asType('bool');
- //-- pipeline1_out.dispose();
+ mask_3d = pipeline1_out.greater([0]).asType('bool');
+ //-- pipeline1_out.dispose();
- }
+ }
console.log(" mask_3d shape : ", mask_3d.shape);
@@ -5975,9 +6104,22 @@ get3dObjectBoundingVolume = async(slices_3d) => {
slices_3d = minMaxNormalizeVolumeData(slices_3d);
}
+
let mask_3d;
- if(pipeline1_out == null) {
+ if(pipeline1_out == null) { // preModel is null
+
+ // Check if thresholding the MRI to remove noisy voxels for better cropping is needed.
+ let autoThresholdValue = inferenceModelsList[$$("selectModel").getValue() - 1]["autoThreshold"];
+
+ if( (autoThresholdValue > 0) && (autoThresholdValue <= 1) ) {
+
+ // Filtered MRI from noisy voxel below autoThresholdValue
+ slices_3d = await applyMriThreshold(slices_3d, autoThresholdValue);
+ } else {
+ console.log("No valid crop threshold value");
+ }
+
// binarize original image
mask_3d = slices_3d.greater([0]).asType('bool');
@@ -6448,26 +6590,21 @@ checkInferenceModelList = () => {
statData["No_SubVolumes"] = 1;
- // check for pre model to load e.g. Brain_Extraction or Brain_Masking
- let preModel = null;
let modelEntry = inferenceModelsList[$$("selectModel").getValue() - 1];
console.log("modelEntry ", modelEntry)
- //-- If pre-model is not null then slices_3d mask will be generated..
- //-- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
- let slices_3d_mask = null;
-
-
// load pre-model for inference first, can be null if no pre-model such as GWM models
if(modelEntry["preModelId"]) {
- preModel = load_model(inferenceModelsList[ modelEntry["preModelId"] - 1]['path'] );
+ let preModel = load_model(inferenceModelsList[ modelEntry["preModelId"] - 1]['path'] );
let transpose = inferenceModelsList[ modelEntry["preModelId"] - 1]["enableTranspose"];
-
let quantileNorm = inferenceModelsList[ modelEntry["preModelId"] - 1]["enableQuantileNorm"];
-
let preModel_slices_3d = null;
+ //-- If pre-model is not null then slices_3d mask will be generated..
+ //-- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
+ let slices_3d_mask = null;
+
if(quantileNorm) {
// Quantile normalize function needs specific models to be used
console.log("preModel Quantile normalization enabled");
@@ -6479,20 +6616,19 @@ checkInferenceModelList = () => {
}
-
-
//-- Transpose MRI data to be match pytorch/keras input output
//-- Check if pre-model needs transpose..
if(transpose) {
- preModel_slices_3d = preModel_slices_3d.transpose()
+
+ preModel_slices_3d = preModel_slices_3d.transpose();
console.log("Input transposed for pre-model");
+
} else {
console.log("Transpose not enabled for pre-model");
}
statData["Brainchop_Ver"] = "PreModel_FV" ; // e.g. "PreModel_FV"
-
preModel.then(function (res) {
try {
@@ -6552,7 +6688,7 @@ checkInferenceModelList = () => {
statData["Model"] = inferenceModelsList[ modelEntry["preModelId"] - 1]["modelName"];
statData["Extra_Info"] = inferenceModelsList[$$("selectModel").getValue() - 1]["modelName"];
-
+
// maxLabelPredicted in whole volume of the brain
let maxLabelPredicted = 0;
let delay = inferenceModelsList[ modelEntry["preModelId"] - 1]["inferenceDelay"];
@@ -6566,7 +6702,6 @@ checkInferenceModelList = () => {
//Dispose the volume
tf.dispose(preModel_slices_3d);
-
let timer = window.setInterval(function() {
diff --git a/js/brainchop/mainParameters.js b/js/brainchop/mainParameters.js
index d395653..5bfe331 100644
--- a/js/brainchop/mainParameters.js
+++ b/js/brainchop/mainParameters.js
@@ -115,6 +115,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -138,6 +139,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -161,6 +163,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -186,6 +189,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -209,6 +213,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -232,6 +237,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -255,6 +261,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -278,6 +285,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -302,6 +310,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -325,6 +334,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -348,6 +358,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -371,6 +382,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -396,6 +408,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -419,6 +432,7 @@
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
+ autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
@@ -428,6 +442,56 @@
description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "
}
+ ,{
+ id: 15,
+ type: "Atlas",
+ path:"./models/model30chan18cls/model.json",
+ modelName:"Subcortical + GWM (High Mem, Fast)",
+ labelsPath: "./models/model30chan18cls/labels.json",
+ colorsPath: "./models/model30chan18cls/colorLUT.json",
+ preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "SynthSeg"
+ }
+
+
+ ,{
+ id: 16,
+ type: "Atlas",
+ path:"./models/model30chan18cls/model.json",
+ modelName:"Q Subcortical + GWM (High Mem, Fast)",
+ labelsPath: "./models/model30chan18cls/labels.json",
+ colorsPath: "./models/model30chan18cls/colorLUT.json",
+ preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
+ preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
+ isBatchOverlapEnable: false, //create extra overlap batches for inference
+ numOverlapBatches: 200, //Number of extra overlap batches for inference
+ enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
+ enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
+ cropPadding: 0, // Padding size add to cropped brain
+ autoThreshold: 0.2, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
+ enableQuantileNorm: true, // Some models needs Quantile Normaliztion.
+ filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
+ enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
+ textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
+ warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
+ inferenceDelay: 100, // Delay in ms time while looping layers applying.
+ description: "SynthSeg"
+ }
+
+
];
diff --git a/models/model30chan18cls/colorLUT.json b/models/model30chan18cls/colorLUT.json
new file mode 100644
index 0000000..27d12d1
--- /dev/null
+++ b/models/model30chan18cls/colorLUT.json
@@ -0,0 +1,21 @@
+{
+ "0": "rgb(0,0,0)",
+ "1": "rgb(245,245,245)",
+ "2": "rgb(205,62,78)",
+ "3": "rgb(120,18,134)",
+ "4": "rgb(196,58,250)",
+ "5": "rgb(220,248,164)",
+ "6": "rgb(230,148,34)",
+ "7": "rgb(0,118,14)",
+ "8": "rgb(122,186,220)",
+ "9": "rgb(236,13,176)",
+ "10": "rgb(12,48,255)",
+ "11": "rgb(204,182,142)",
+ "12": "rgb(42,204,164)",
+ "13": "rgb(119,159,176)",
+ "14": "rgb(220,216,20)",
+ "15": "rgb(103,255,255)",
+ "16": "rgb(255,165,0)",
+ "17": "rgb(165,42,42)"
+}
+
diff --git a/models/model30chan18cls/labels.json b/models/model30chan18cls/labels.json
new file mode 100644
index 0000000..d022502
--- /dev/null
+++ b/models/model30chan18cls/labels.json
@@ -0,0 +1,20 @@
+{
+ "0": "Unknown",
+ "1": "Cerebral-White-Matter",
+ "2": "Cerebral-Cortex",
+ "3": "Lateral-Ventricle",
+ "4": "Inferior-Lateral-Ventricle",
+ "5": "Cerebellum-White-Matter",
+ "6": "Cerebellum-Cortex",
+ "7": "Thalamus",
+ "8": "Caudate",
+ "9": "Putamen",
+ "10": "Pallidum",
+ "11": "3rd-Ventricle",
+ "12": "4th-Ventricle",
+ "13": "Brain-Stem",
+ "14": "Hippocampus",
+ "15": "Amygdala",
+ "16": "Accumbens-area",
+ "17": "VentralDC"
+}
diff --git a/models/model30chan18cls/model.bin b/models/model30chan18cls/model.bin
new file mode 100644
index 0000000..3459133
Binary files /dev/null and b/models/model30chan18cls/model.bin differ
diff --git a/models/model30chan18cls/model.json b/models/model30chan18cls/model.json
new file mode 100644
index 0000000..179715b
--- /dev/null
+++ b/models/model30chan18cls/model.json
@@ -0,0 +1,808 @@
+{
+ "format": "layers-model",
+ "generatedBy": "keras v2.7.0",
+ "convertedBy": "TensorFlow.js Converter v3.9.0",
+ "modelTopology": {
+ "keras_version": "2.6.0",
+ "backend": "tensorflow",
+ "model_config": {
+ "class_name": "Functional",
+ "config": {
+ "name": "model",
+ "layers": [
+ {
+ "class_name": "InputLayer",
+ "config": {
+ "batch_input_shape": [
+ null,
+ 256,
+ 256,
+ 256,
+ 1
+ ],
+ "dtype": "float32",
+ "sparse": false,
+ "ragged": false,
+ "name": "input"
+ },
+ "name": "input",
+ "inbound_nodes": []
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_0",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_0",
+ "inbound_nodes": [
+ [
+ [
+ "input",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_1",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_1",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_0",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_2",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_2",
+ "inbound_nodes": [
+ [
+ [
+ "activation_1",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_3",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_3",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_2",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_4",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_4",
+ "inbound_nodes": [
+ [
+ [
+ "activation_3",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_5",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_5",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_4",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_6",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_6",
+ "inbound_nodes": [
+ [
+ [
+ "activation_5",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_7",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_7",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_6",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_8",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 16,
+ 16,
+ 16
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_8",
+ "inbound_nodes": [
+ [
+ [
+ "activation_7",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_9",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_9",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_8",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_10",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 8,
+ 8,
+ 8
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_10",
+ "inbound_nodes": [
+ [
+ [
+ "activation_9",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_11",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_11",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_10",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_12",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 4,
+ 4,
+ 4
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_12",
+ "inbound_nodes": [
+ [
+ [
+ "activation_11",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_13",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_13",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_12",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_14",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 2,
+ 2,
+ 2
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_14",
+ "inbound_nodes": [
+ [
+ [
+ "activation_13",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_15",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_15",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_14",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "conv3d_16",
+ "trainable": false,
+ "filters": 30,
+ "kernel_size": [
+ 3,
+ 3,
+ 3
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "conv3d_16",
+ "inbound_nodes": [
+ [
+ [
+ "activation_15",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Activation",
+ "config": {
+ "name": "activation_17",
+ "trainable": false,
+ "dtype": "float32",
+ "activation": "elu"
+ },
+ "name": "activation_17",
+ "inbound_nodes": [
+ [
+ [
+ "conv3d_16",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ },
+ {
+ "class_name": "Conv3D",
+ "config": {
+ "name": "output",
+ "trainable": false,
+ "filters": 18,
+ "kernel_size": [
+ 1,
+ 1,
+ 1
+ ],
+ "strides": [
+ 1,
+ 1,
+ 1
+ ],
+ "dilation_rate": [
+ 1,
+ 1,
+ 1
+ ],
+ "padding": "same",
+ "data_format": "channels_last",
+ "activation": "linear",
+ "use_bias": true,
+ "dtype": "float32"
+ },
+ "name": "output",
+ "inbound_nodes": [
+ [
+ [
+ "activation_17",
+ 0,
+ 0,
+ {}
+ ]
+ ]
+ ]
+ }
+ ],
+ "input_layers": [
+ [
+ "input",
+ 0,
+ 0
+ ]
+ ],
+ "output_layers": [
+ [
+ "output",
+ 0,
+ 0
+ ]
+ ]
+ }
+ }
+ },
+ "weightsManifest": [
+ {
+ "paths": [
+ "model.bin"
+ ],
+ "weights": [
+ {
+ "name": "conv3d_0/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 1,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_0/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_2/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_4/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_6/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_8/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_10/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_12/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_14/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/kernel",
+ "shape": [
+ 3,
+ 3,
+ 3,
+ 30,
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "conv3d_16/bias",
+ "shape": [
+ 30
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/kernel",
+ "shape": [
+ 1,
+ 1,
+ 1,
+ 30,
+ 18
+ ],
+ "dtype": "float32"
+ },
+ {
+ "name": "output/bias",
+ "shape": [
+ 18
+ ],
+ "dtype": "float32"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file