diff --git a/js/brainchop/connectedComponents3DAll.js b/js/brainchop/connectedComponents3DAll.js
index 44d7eb6..01cb845 100644
--- a/js/brainchop/connectedComponents3DAll.js
+++ b/js/brainchop/connectedComponents3DAll.js
@@ -541,65 +541,6 @@ class ConnectCompFor3D extends ConnectCompFor2D {
return label3D;
}
- // For future use
-
- getConComponentsFor3DVolumeWithTimer = async(volumeSlices, sliceHeight, sliceWidth) => {
-
- const self = this;
-
- return new Promise((resolve, reject) => {
- document.getElementById("progressBarChild").parentElement.style.visibility = "visible";
- document.getElementById("progressBarChild").style.width = 0;
-
- let binaryMaskData1D = [];
- let binaryMaskData2D = [];
- let label3D = [];
-
- let sliceIdx = 0;
-
- let ccTimer = window.setInterval(function() {
-
- binaryMaskData1D[sliceIdx] = getBinaryMaskData1D(volumeSlices[sliceIdx]); // binaryMaskData1D has values 0 or 1
- binaryMaskData2D[sliceIdx] = convertBinaryDataTo2D(binaryMaskData1D[sliceIdx], sliceHeight, sliceWidth);
-
- if(sliceIdx == 0) {
- //Only called for once at begining with first slice
- label3D[sliceIdx] = self.getConComponentsFor2D(binaryMaskData2D[sliceIdx], sliceHeight, sliceWidth);
-
- } else {
- label3D[sliceIdx] = self._getConComponentsFor2Slices(binaryMaskData2D[sliceIdx], label3D[sliceIdx - 1], sliceHeight, sliceWidth);
- }
-
-
- if(sliceIdx == (volumeSlices.length -1)) {
- document.getElementById("progressBarChild").style.width = 0;
- window.clearInterval( ccTimer );
-
- // 3d connected components third pass
- for(let sliceIdx = 0; sliceIdx < volumeSlices.length; sliceIdx++) {
- let row, col;
- for(row = 0; row < sliceHeight; row++) {
- for(col = 0; col < sliceWidth; col++) {
-
- if( label3D[sliceIdx][row][col] != 0) {
- label3D[sliceIdx][row][col] = self._equivalenceTabel[label3D[sliceIdx][row][col]];
- }
- }
- }
- }
-
- resolve(label3D);
- }
-
- sliceIdx++;
- document.getElementById("progressBarChild").style.width = (sliceIdx + 1)*100/volumeSlices.length + "%";
-
- }, 10); // timer delay
-
- })
- }
-
-
/**
* Get connected components For a Volume of 2 slices, current slice and previous slice.-- (refine)
@@ -702,10 +643,9 @@ class ConnectCompFor3D extends ConnectCompFor2D {
findLargest3dRegion = (volumeSlices, sliceHeight, sliceWidth) => {
let label3D = [];
-
+
label3D = this.getConComponentsFor3DVolume(volumeSlices, sliceHeight, sliceWidth);
- //-- label3D = await this.getConComponentsFor3DVolumeWithTimer(volumeSlices, sliceHeight, sliceWidth);
-
+
// Filter only largest volumetric 3d region with the most voxels of same label and remove noisy smaller 3d regions
let maxVolumeLabel = this.getMostFreqVolumeLabel3D(label3D, sliceHeight, sliceWidth, volumeSlices.length);
diff --git a/js/brainchop/mainMeshNetFunctions.js b/js/brainchop/mainMeshNetFunctions.js
index 9a0436e..da8a016 100644
--- a/js/brainchop/mainMeshNetFunctions.js
+++ b/js/brainchop/mainMeshNetFunctions.js
@@ -1,6 +1,6 @@
/*
=========================================================
-* Brainchop - v3.0.0
+* Brainchop - v2.2.0 TESTING
=========================================================
* Discription: A user interface for whole brain segmentation
@@ -86,104 +86,6 @@
return data1DimArr;
}
-
-/**
-* Check if string
-*
-* @since 3.0.0
-* @param {Any} variable
-* @returns {bool}
-* @example
-*
-* isString("someString")
-* // => true
-*
-* isString({ "0": "BG", "1": "Cerebral-White-Matter", "2": "Ventricle"})
-* // => false
-*
-* isString("")
-* // => false
-*/
-
- isString = (variable) => {
- return (typeof variable === 'string' || variable instanceof String) &&
- (variable !== null) && variable.length ? true : false;
- }
-
-
-/**
-* Check if object
-*
-* @since 3.0.0
-* @param {Any} variable
-* @returns {bool}
-* @example
-*
-* isObject({ "0": "BG", "1": "Cerebral-White-Matter", "2": "Ventricle"})
-* // => true
-*
-* isObject("someString")
-* // => false
-*
-*/
-
- isObject = (variable) => {
- return (typeof variable === 'object') && (variable !== null) ? true : false;
- }
-
-
- /**
- * Find if two arrays are identical.
- *
- * @function
- * @since 3.0.0
- * @version 3.0.0
- * @category Array
- * @param {Array} array1 - The array of values.
- * @param {Array} array2 - The array of values.
- * @returns {boolean}
- * @example
- *
- * areArraysEquals( [1, 1, 2, 3], [1, 1, 2, 5])
- *
- * => false
- */
-
- areArraysEquals = (array1, array2) => {
- return JSON.stringify(array1) === JSON.stringify(array2) ? true : false;
- }
-
-
- /**
- * Verify if parent object has all keys of child object
- * e.g. child object: labelsHistoObj, parent object: colorLutObj or labelsObj
- *
- *
- * @function
- * @since 1.0.0
- * @version 3.0.0
- * @param {object} childObj - The child object e.g. labelsHistoObj
- * @param {object} parentObj - The parent object e.g. colorLutObj or labelsObj
- * @returns {boolean}
- * @example
- *
- * verifyChildParentObjects( {"x": 1, "y": 2}, {"y": 2, "z": 3, "x": 4})
- *
- * => true
- */
-
- verifyChildParentObjects = (childObj, parentObj) => {
-
- Object.keys(childObj).forEach((childKey, idx) => {
-
- if ( ! parentObj.hasOwnProperty(childKey)) {
- return false;
- }
- })
-
- return true;
- }
-
/**
* Generates number of colors using HSL wheel hsl(hue, saturation, lightness).
*
@@ -431,9 +333,9 @@ rgbToHex = (rgbObj) => {
* @returns {Array} Returns 2D labels array outputSlices[sliceIdx][sliceHeight*sliceWidth] after filtering noisy 3d regions
* @example
*
-* await postProcessSlices3D( [ [0,0,0,0, 0,1,1,0, 0,0,0,0],
- [0,0,0,0, 0,0,1,1, 0,0,0,0],
- [0,0,0,0, 0,0,0,1, 0,1,1,0] ], 3, 4)
+* postProcessSlices3D( [ [0,0,0,0, 0,1,1,0, 0,0,0,0],
+* [0,0,0,0, 0,0,1,1, 0,0,0,0],
+* [0,0,0,0, 0,0,0,1, 0,1,1,0] ], 3, 4)
*
* // => [ [0,0,0,0, 0,1,1,0, 0,0,0,0],
* [0,0,0,0, 0,0,1,1, 0,0,0,0],
@@ -455,6 +357,7 @@ rgbToHex = (rgbObj) => {
+
///////////////************************3D Contours*********************************////////////////////
getSliceContoursMaskByLabel = (imgDataForLabel, mask, color) => {
@@ -1158,104 +1061,58 @@ rgbToHex = (rgbObj) => {
const normalizedSlices_3d = volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min));
return normalizedSlices_3d;
}
+
+async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) {
+ // Step 1: Flatten the tensor
+ const flatTensor = tensor.flatten();
+
+ // Step 2: Convert the flattened tensor to an array to sort it
+ const flatArray = await flatTensor.array();
+ flatArray.sort((a, b) => a - b); // Sort the array in ascending order
+
+ // Convert the sorted array back to a tensor
+ const sortedTensor = tf.tensor1d(flatArray);
+
+ // Step 3: Calculate the indices for the quantiles
+ const numElements = sortedTensor.shape[0];
+ const lowIndex = Math.floor(numElements * lowerQuantile);
+ const highIndex = Math.ceil(numElements * upperQuantile) - 1; // Subtract 1 because indices are 0-based
+
+ // Step 4: Slice the sorted tensor to get qmin and qmax
+ const qmin = sortedTensor.slice(lowIndex, 1); // Get the value at the low index
+ const qmax = sortedTensor.slice(highIndex, 1); // Get the value at the high index
+
+ // Get the actual values from the tensors
+ const qminValue = (await qmin.array())[0];
+ const qmaxValue = (await qmax.array())[0];
+
+ // Clean up tensors to free memory
+ flatTensor.dispose();
+ sortedTensor.dispose();
+ qmin.dispose();
+ qmax.dispose();
+
+ return { qmin: qminValue, qmax: qmaxValue };
+ }
-/**
-* For future use
-* Calculate the tensor data quantiles
-* @since 3.0.0
-* @param {tf.Tensor} tensor - Tensor1d/Tensor2d/Tensor3d, e.g. Tensor3d of all MRI volume data
-* @param {number} lowerQuantile
-* @param {number} upperQuantile
-* @returns {object}
-* @example
-*
-* await calculateQuantiles ( tf.tensor( Array.from({length: 8}, (x, i) => i) , [2, 2, 2]) )
-*
-* // => Object { qmin: 0, qmax: 7 }
-*
-*/
-
-
- calculateQuantiles = async(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) => {
- // Flatten the tensor
- const flatTensor = tensor.flatten();
-
- // Convert the flattened tensor to an array to sort it
- const flatArray = await flatTensor.array();
- flatArray.sort((a, b) => a - b); // Sort the array in ascending order
-
- // Convert the sorted array back to a tensor
- const sortedTensor = tf.tensor1d(flatArray);
-
- // Calculate the indices for the quantiles
- const numElements = sortedTensor.shape[0];
- const lowIndex = Math.floor(numElements * lowerQuantile);
- const highIndex = Math.ceil(numElements * upperQuantile) - 1; // Subtract 1 because indices are 0-based
-
- // Slice the sorted tensor to get qmin and qmax
- const qmin = sortedTensor.slice(lowIndex, 1); // Get the value at the low index
- const qmax = sortedTensor.slice(highIndex, 1); // Get the value at the high index
-
- // Get the actual values from the tensors
- const qminValue = (await qmin.array())[0];
- const qmaxValue = (await qmax.array())[0];
-
- // Clean up tensors to free memory
- flatTensor.dispose();
- sortedTensor.dispose();
- qmin.dispose();
- qmax.dispose();
-
- return { qmin: qminValue, qmax: qmaxValue };
- }
-
-
-/**
-* For future use
-* Normalize the tensor data using quantiles
-* @since 3.0.0
-* @param {tf.Tensor} tensor - Tensor1d/Tensor2d/Tensor3d, e.g. Tensor3d of all MRI volume data
-* @param {number} lowerQuantile
-* @param {number} upperQuantile
-* @returns {tf.Tensor}
-* @example
-*
-* normTensor = await normalizeTensor ( tf.tensor( Array.from({length: 8}, (x, i) => i) , [2, 2, 2]) )
-*
-* // => Object Object { kept: false, isDisposedInternal: false, shape: (3) […], dtype: "float32", size: 8,
-* strides: (2) […], dataId: {…}, id: 9, rankType: "3", scopeId: 5 }
-*
-* normTensor.print()
-*
-* //=> Tensor
-* [[[0 , 0.1428571],
-* [0.2857143, 0.4285715]],
-*
-* [[0.5714286, 0.7142857],
-* [0.8571429, 1 ]]]
-*/
-
-
- normalizeTensor = async (tensor, lowerQuantile = 0.05, upperQuantile = 0.95) => {
- // Call calculateQuantiles and wait for the result
- const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile);
-
- // Convert qmin and qmax back to scalars
- const qminScalar = tf.scalar(qmin);
- const qmaxScalar = tf.scalar(qmax);
-
- // Perform the operation: (tensor - qmin) / (qmax - qmin)
- const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar));
+ async function normalizeTensor(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) {
+ // Call calculateQuantiles and wait for the result
+ const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile);
- // Dispose of the created scalars to free memory
- qminScalar.dispose();
- qmaxScalar.dispose();
+ // Convert qmin and qmax back to scalars
+ const qminScalar = tf.scalar(qmin);
+ const qmaxScalar = tf.scalar(qmax);
- // Return the resulting tensor
- return resultTensor;
- }
+ // Perform the operation: (tensor - qmin) / (qmax - qmin)
+ const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar));
+ // Dispose of the created scalars to free memory
+ qminScalar.dispose();
+ qmaxScalar.dispose();
+ // Return the resulting tensor
+ return resultTensor;
+}
/**
* load pre-trained model from local drive
@@ -1288,9 +1145,9 @@ rgbToHex = (rgbObj) => {
*
*/
- load_browser_model = async( modelFile, weightFile) => {
- return await tf.loadLayersModel(tf.io.browserFiles( [ modelFile, weightFile ]));
- }
+ load_browser_model = async( modelFile, weightFile) => {
+ return await tf.loadLayersModel(tf.io.browserFiles( [ modelFile, weightFile ]));
+ }
/**
* Generates range of colors for Segmentation classes -- (refine)
@@ -2347,9 +2204,6 @@ mergeSubVolumes_old = (allPredictions, num_of_slices, slice_height, slice_width,
}
-
-
-
/**
* Generate output labels of all slices. (refine)
* Find current voxel value of the related seg class buffer, if we have numSegClasses = 3 then we have 3 buffers,
@@ -2386,7 +2240,7 @@ generateOutputSlicesV2 = (unstackOutVolumeTensor, num_of_slices, numSegClasses,
// Remove noisy regions using 3d CC
let sliceWidth = niftiHeader.dims[1];
let sliceHeight = niftiHeader.dims[2];
- return postProcessSlices3D(allOutputSlices3DCC, sliceHeight, sliceWidth );
+ return postProcessSlices3D(allOutputSlices3DCC, sliceHeight, sliceWidth );
})
console.log("Post processing done ... ");
@@ -2570,125 +2424,63 @@ generateOutputSlicesV2 = (unstackOutVolumeTensor, num_of_slices, numSegClasses,
let roiLabels = [];
let chartXaxisStep = 1;
- console.log("labelsHistoObj Keys: ", Object.keys(labelsHistoObj));
-
-
- let colorLutObj = getExternalJSON(colorURL);
- //--e.g. colorLutObj- e.g. {"0": "rgb(0,0,0)", "1": "rgb(245,245,245)", "2": "rgb(196,58,250)", ... }
- let labelsObj = getExternalJSON(labelsURL);
- //-- e.g. labelsObj- { "0": "BG", "1": "Cerebral-White-Matter", "2": "Ventricle",..}
-
- // Color object, check if segmenation labels less or equal colors
- if ( isObject(colorLutObj) ? verifyChildParentObjects( Object.keys(labelsHistoObj).length, Object.keys(colorLutObj).length) : false ) {
-
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- roiData.push({y: labelsHistoObj[labelKey] * 1 / totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
- })
+ if(! maskBrainExtraction) { // If Atlas 50, 104 or GMWM Segmenations
- } else {
- colorLutObj = {};
-
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- colorLutObj[labelKey] = "rgb(" + labelKey + "," + labelKey + "," + labelKey + ")";
- })
+ let colorLutObj = getExternalJSON(colorURL);
+ outVolumeStatus['colorLutObj'] = colorLutObj;
+ //--e.g. colorLutObj- e.g. {"0": "rgb(0,0,0)", "1": "rgb(245,245,245)", "2": "rgb(196,58,250)", ... }
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- roiData.push({y: labelsHistoObj[labelKey] * 1 / totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
+ let labelsObj = getExternalJSON(labelsURL);
+ outVolumeStatus['labelsObj'] = labelsObj;
+ //-- e.g. labelsObj- { "0": "BG", "1": "Cerebral-White-Matter", "2": "Ventricle",..}
- })
- }
+ Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
+ roiData.push({y: labelsHistoObj[labelKey] * 1/totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
+ roiLabels[idx] = labelsObj[labelKey];
+ })
- outVolumeStatus['colorLutObj'] = colorLutObj;
+ //-- roiData = [ {y: 34.4, color: 'red'}, {y: 20.1, color: '#aaff99'}];
+ //-- roiLabels = ['Roi-1','Roi-2'];
+ } else { // For mask or brain extraction models
- // label object, check if segmenation classes have less or equal labels in the label json file
- if ( isObject(labelsObj) ? verifyChildParentObjects( Object.keys(labelsHistoObj), Object.keys(labelsObj) ): false ) {
+ let colorLutObj = {};
+ let labelsObj = {};
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- roiLabels[idx] = labelsObj[labelKey];
- })
+ Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
+ colorLutObj[labelKey] = "rgb(" + labelKey + "," + labelKey + "," + labelKey + ")";
+ labelsObj[labelKey] = labelKey;
+ })
- outVolumeStatus['labelsObj'] = labelsObj;
- } else {
- labelsObj = {};
+ Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
+ roiData.push({y: labelsHistoObj[labelKey] * 1/totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
+ if(idx == 0 || idx == Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent) || idx == Object.keys(labelsHistoObj).length -1 ){
+ roiLabels[idx] = labelsObj[labelKey];
+ }
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- labelsObj[labelKey] = labelKey;
- })
+ })
- Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- if(idx == 0 || idx == Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent) || idx == Object.keys(labelsHistoObj).length -1 ){
- roiLabels[idx] = labelsObj[labelKey];
- }
+ chartXaxisStep = Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent);
- })
+ outVolumeStatus['colorLutObj'] = colorLutObj;
+ // To only show All make label null
+ outVolumeStatus['labelsObj'] = null;
- chartXaxisStep = Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent);
- // To only show All make label null
- outVolumeStatus['labelsObj'] = null;
}
-
-
- // if( (! maskBrainExtraction) && (labelsURL !== null) && (colorURL !== null) ) { // If Atlas 50, 104 or GMWM Segmenations
-
- // let colorLutObj = getExternalJSON(colorURL);
- // outVolumeStatus['colorLutObj'] = colorLutObj;
- // //--e.g. colorLutObj- e.g. {"0": "rgb(0,0,0)", "1": "rgb(245,245,245)", "2": "rgb(196,58,250)", ... }
-
- // let labelsObj = getExternalJSON(labelsURL);
- // outVolumeStatus['labelsObj'] = labelsObj;
- // //-- e.g. labelsObj- { "0": "BG", "1": "Cerebral-White-Matter", "2": "Ventricle",..}
-
-
- // Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- // roiData.push({y: labelsHistoObj[labelKey] * 1 / totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
- // roiLabels[idx] = labelsObj[labelKey];
- // })
-
- // //-- roiData = [ {y: 34.4, color: 'red'}, {y: 20.1, color: '#aaff99'}];
- // //-- roiLabels = ['Roi-1','Roi-2'];
-
- // } else { // For mask or brain extraction models or when label/color json not provided
-
- // let colorLutObj = {};
- // let labelsObj = {};
-
- // Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- // colorLutObj[labelKey] = "rgb(" + labelKey + "," + labelKey + "," + labelKey + ")";
- // labelsObj[labelKey] = labelKey;
- // })
-
-
- // Object.keys(labelsHistoObj).forEach((labelKey, idx) => {
- // roiData.push({y: labelsHistoObj[labelKey] * 1 / totalTissueVol, color: rgbToHex( getRgbObject( colorLutObj[labelKey] ) ) });
- // if(idx == 0 || idx == Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent) || idx == Object.keys(labelsHistoObj).length -1 ){
- // roiLabels[idx] = labelsObj[labelKey];
- // }
-
- // })
-
- // chartXaxisStep = Math.round(Object.keys(labelsHistoObj).length * opts.chartXaxisStepPercent);
-
- // outVolumeStatus['colorLutObj'] = colorLutObj;
- // // To only show All make label null
- // outVolumeStatus['labelsObj'] = null;
- // }
-
-
$$("hchart").config.settings.xAxis.categories = roiLabels;
$$("hchart").config.settings.xAxis.labels.step = chartXaxisStep;
$$("hchart").config.settings.series[0].data = roiData;
$$("hchart")._render();
- $$("out3DIcon").enable();
- $$("outChartIcon").enable();
- document.getElementById("out3D-1").style.opacity = 1;
- document.getElementById("outChart-1").style.opacity = 1;
- document.getElementById("out3D-1").style.filter = "alpha(opacity=100)";
- document.getElementById("outChart-1").style.filter = "alpha(opacity=100)";
+ // $$("out3DIcon").enable();
+ // $$("outChartIcon").enable();
+ // document.getElementById("out3D-1").style.opacity = 1;
+ // document.getElementById("outChart-1").style.opacity = 1;
+ // document.getElementById("out3D-1").style.filter = "alpha(opacity=100)";
+ // document.getElementById("outChart-1").style.filter = "alpha(opacity=100)";
}
@@ -3535,1499 +3327,307 @@ accumulateArrBufSizes = (bufferSizesArr) => {
}
+/////////////////////////////////----------------SEQ LAYER-----------------//////////////////////////////////
+function processTensorInChunks(inputTensor, vector, chunkSize) {
+ const rank = inputTensor.rank;
+ const lastDimension = inputTensor.shape[rank - 1];
+ if (lastDimension !== vector.size) {
+ throw new Error('The last dimension of the input tensor must match the length of the vector.');
+ }
-/**
-* Inference Function for sub-volumes
-*
-* In version 3.0.0 this function not used, can reuse in future versions
-*
-* @since 1.0.0
-* @param {promise} model
-* @param {tf.Tensor} slices_3d
-* @param {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
-* @param {boolen} isChannelLast- check input shape for channel position.
-* @param {number} num_of_slices- Total Number of slices a.k.a z-dim
-* @param {number} slice_height- - Slice Height
-* @param {number} slice_width- Slice Width
-* @param {number} batch_D- Batch Depth
-* @param {number} batch_H- Batch Height
-* @param {number} batch_W- Batch Width
-*
-*/
+ if (chunkSize <= 0 || chunkSize > lastDimension) {
+ throw new Error('Invalid chunk size.');
+ }
- inferenceSubVolumes = async(model, slices_3d, num_of_slices, slice_height, slice_width, pipeline1_out = null) => {
+ return tf.tidy(() => {
+ let accumulatedResult = null;
- let refVoxel = [], boundVolSizeArr = [];
- let enableCrop = inferenceModelsList[$$("selectModel").getValue() - 1]["enableCrop"];
+ for (let i = 0; i < lastDimension; i += chunkSize) {
+ const sliceSize = Math.min(chunkSize, lastDimension - i);
- if(enableCrop) {
+ const tensorSlice = inputTensor.slice([...Array(rank - 1).fill(0), i], [-1, -1, -1, sliceSize]);
+ const vectorSlice = vector.slice(i, sliceSize);
- //--Phase-2, After remove the skull try to allocate brain volume and make inferece
- console.log(" ---- Start SubVolume inference phase-II ---- ");
+ const multiplied = tf.mul(tensorSlice, vectorSlice);
+ tensorSlice.dispose();
+ vectorSlice.dispose();
- let mask_3d;
+ const summed = tf.sum(multiplied, -1);
+ multiplied.dispose(); // Dispose of the multiplied tensor, as we no longer need it.
- if(pipeline1_out == null) {
- // binarize original image if there is no pre-model for masking task
- mask_3d = slices_3d.greater([0]).asType('bool');
+ if (accumulatedResult === null) {
+ accumulatedResult = summed;
+ } else {
+ // Before updating accumulatedResult, dispose of the previous tensor
+ const oldAccumulatedResult = accumulatedResult;
+ accumulatedResult = oldAccumulatedResult.add(summed);
+ oldAccumulatedResult.dispose(); // Dispose of the old accumulated result
+ summed.dispose(); // Dispose of the summed tensor, as it is now part of the accumulated result
+ }
+ }
- } else {
+ return accumulatedResult;
+ });
+}
- mask_3d = pipeline1_out.greater([0]).asType('bool');
- pipeline1_out.dispose();
+function processTensorInChunks111(inputTensor, vector, chunkSize) {
+ const rank = inputTensor.rank;
+ const lastDimension = inputTensor.shape[rank - 1];
- }
+ if (lastDimension !== vector.size) {
+ throw new Error('The last dimension of the input tensor must match the length of the vector.');
+ }
- console.log(" mask_3d shape : ", mask_3d.shape);
+ if (chunkSize <= 0 || chunkSize > lastDimension) {
+ throw new Error('Invalid chunk size.');
+ }
- const coords = await tf.whereAsync(mask_3d);
- //-- Get each voxel coords (x, y, z)
+ return tf.tidy(() => {
+ let accumulatedResult = tf.zeros(inputTensor.shape.slice(0, rank - 1));
- mask_3d.dispose();
+ for (let i = 0; i < lastDimension; i += chunkSize) {
+ const sliceSize = Math.min(chunkSize, lastDimension - i);
- const coordsArr = coords.arraySync();
+ const tensorSlice = inputTensor.slice([...Array(rank - 1).fill(0), i], [-1, -1, -1, sliceSize]);
+ const vectorSlice = vector.slice(i, sliceSize);
- let row_min = slice_height, row_max = 0, col_min = slice_width, col_max = 0, depth_min = num_of_slices, depth_max = 0;
+ //const summed = tf.sum(tf.mul(tensorSlice, vectorSlice), -1);
+ const oldAccumulatedResult = accumulatedResult;
+ accumulatedResult = oldAccumulatedResult.add(tensorSlice.reshape([-1, sliceSize])
+ .matMul(vectorSlice.reshape([sliceSize, 1]))
+ .reshape(inputTensor.shape.slice(0, 3)))
+ oldAccumulatedResult.dispose(); // Dispose of the old accumulated result
+ tensorSlice.dispose();
+ vectorSlice.dispose();
+ }
+ return accumulatedResult;
+ });
+}
- for(let i = 0; i < coordsArr.length; i++) {
- if ( row_min > coordsArr[i][0] ) {
- row_min = coordsArr[i][0];
- } else if(row_max < coordsArr[i][0]) {
- row_max = coordsArr[i][0];
- }
+// class SequentialConvLayer_v1 {
+// constructor(model, chunkSize, isChannelLast) {
+// this.model = model;
+// this.outChannels = model.outputLayers[0].kernel.shape[4];
+// this.chunkSize = chunkSize;
+// this.isChannelLast = isChannelLast;
+// }
+
+// async apply(inputTensor) {
+// const startTime = performance.now();
+
+// const convLayer = this.model.layers[this.model.layers.length - 1];
+// const weights = convLayer.getWeights()[0];
+// const biases = convLayer.getWeights()[1];
+// const outputShape = this.isChannelLast ? inputTensor.shape.slice(1,-1) : inputTensor.shape.slice(2);
+// //-- e.g. outputShape : [256,256,256] or cropped Dim
+// //-- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W]
+// //-- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W]
+
+// let outB = tf.mul(tf.ones(outputShape), -10000);
+// let outC = tf.zeros(outputShape);
+
+// for (let i = 0; i < this.outChannels; i++) {
+// const result = tf.tidy(() => {
+// const filterWeights = weights.slice([0, 0, 0, 0, i], [-1, -1, -1, -1, 1]);
+// const filterBiases = biases.slice([i], [1]);
+// const outA = processTensorInChunks(tf.squeeze(inputTensor), tf.squeeze(filterWeights), Math.min(this.chunkSize, this.outChannels)).add(filterBiases);
+// const greater = tf.greater(outA, outB);
+// const newoutB = tf.where(greater, outA, outB);
+// const newoutC = tf.where(greater, tf.fill(outC.shape, i), outC);
+// // Dispose the old tensors before reassigning
+// tf.dispose([outB, outC]);
+// return [newoutC, newoutB];
+// });
+// // Assign the new values to outC and outB
+// outC = result[0];
+// outB = result[1];
+// }
+// tf.dispose(outB);
+
+// const endTime = performance.now();
+// const executionTime = endTime - startTime;
+// console.log(`Execution time for output layer: ${executionTime} milliseconds`);
+
+// return outC;
+// }
+// }
- if ( col_min > coordsArr[i][1] ) {
- col_min = coordsArr[i][1];
- } else if(col_max < coordsArr[i][1]) {
- col_max = coordsArr[i][1];
- }
+showMemStatus = async(chIdx, totalChannels) => {
- if ( depth_min > coordsArr[i][2] ) {
- depth_min = coordsArr[i][2];
- } else if(depth_max < coordsArr[i][2]) {
- depth_max = coordsArr[i][2];
- }
- }
+ return new Promise((resolve, reject) => {
+ let memStatus = tf.memory().unreliable ? "Red" : "Green";
+ let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons.fontcolor("red").bold() : "";
+ document.getElementById("memoryStatus").style.backgroundColor = memStatus;
- console.log( "row min and max :", row_min, row_max);
- console.log( "col min and max :", col_min, col_max);
- console.log( "depth min and max :", depth_min, depth_max);
+ document.getElementById("memStatusParagraph").innerHTML = "Channels completed: " + (chIdx + 1) + " / " + totalChannels +
+ // https://js.tensorflow.org/api/latest/#memory
+ "
" +"TF Memory Status: " + memStatus.fontcolor(tf.memory().unreliable ? "red" : "green").bold() +
+ // numBytes: Number of bytes allocated (undisposed) at this time
+ "
" + "numBytes : " + Math.round(tf.memory().numBytes/(1024*1024)) + " MB" +
+ //numBytesInGPU : Number of bytes allocated (undisposed) in the GPU only at this time
+ "
" + "numBytesInGPU : " + Math.round(tf.memory().numBytesInGPU/(1024*1024)) + " MB" +
+ "
" + "numBytesInGPUAllocated : " + Math.round(tf.memory().numBytesInGPUAllocated/(1024*1024)) + " MB" +
+ "
" + "numBytesInGPUFree : " + Math.round(tf.memory().numBytesInGPUFree/(1024*1024)) + " MB" +
+ // numDataBuffers : Number of unique data buffers allocated (undisposed) at this time, which is ≤ the number of tensors
+ "
" + "numDataBuffers : " + tf.memory().numDataBuffers +
+ "
" + "numTensors : " + tf.memory().numTensors +
+ "
" + unreliableReasons ;
- //-- Reference voxel that cropped volume started slice with it
- refVoxel = [row_min, col_min, depth_min];
- // -- Starting form refVoxel, size of bounding volume
- boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1];
+ resolve(); // When this fires, the code in a().then(/..../); is executed.
+ });
- coords.dispose();
- //-- Extract 3d object (e.g. brain)
- slices_3d = slices_3d.slice([row_min, col_min, depth_min], [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] )
+}
- //-- Padding size add to cropped brain
- let pad = inferenceModelsList[$$("selectModel").getValue() - 1]["cropPadding"];
+class SequentialConvLayer {
+ constructor(model, chunkSize, isChannelLast) {
+ this.model = model;
+ this.outChannels = model.outputLayers[0].kernel.shape[4];
+ this.chunkSize = chunkSize;
+ this.isChannelLast = isChannelLast;
+ }
- // Create margin around the bounding volume
- slices_3d = addZeroPaddingTo3dTensor(slices_3d, [pad, pad] , [pad, pad], [pad, pad]);
- console.log(" cropped slices_3d with padding shape: ", slices_3d.shape);
+ async apply(inputTensor) {
+ const self = this;
+ // Important to avoid "undefined" class var members inside the timer.
+ // "this" has another meaning inside the timer.
- if(opts.drawBoundingVolume) {
+ return new Promise((resolve, reject) => {
- let testVol = removeZeroPaddingFrom3dTensor(slices_3d, pad, pad, pad);
- console.log(" testVol without padding shape : ", testVol.shape);
+ const startTime = performance.now();
- testVol = resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
- console.log(" testVol final shape after resizing : ", testVol.shape);
+ const convLayer = self.model.layers[self.model.layers.length - 1];
+ const weights = convLayer.getWeights()[0];
+ const biases = convLayer.getWeights()[1];
+ const outputShape = self.isChannelLast ? inputTensor.shape.slice(1,-1) : inputTensor.shape.slice(2);
+ //-- e.g. outputShape : [256,256,256] or cropped Dim
+ //-- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W]
+ //-- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W]
- draw3dObjBoundingVolume(tf.unstack(testVol));
- testVol.dispose();
+ let outB = tf.mul(tf.ones(outputShape), -10000);
+ let outC = tf.zeros(outputShape);
+ let chIdx = 0;
- return 0;
- }
+ // console.log("---------------------------------------------------------");
+ console.log(" channel loop");
- }
+ let seqTimer = window.setInterval(async function() {
+ // console.log(" channel : ", chIdx);
+ console.log(chIdx);
- let transpose = inferenceModelsList[$$("selectModel").getValue() - 1]["enableTranspose"];
- if(transpose) {
- slices_3d = slices_3d.transpose()
- console.log("Input transposed for model");
- } else {
- console.log("Transpose not enabled for model");
- }
+ const result = tf.tidy(() => {
+ const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]);
+ const filterBiases = biases.slice([chIdx], [1]);
+ const outA = processTensorInChunks(tf.squeeze(inputTensor), tf.squeeze(filterWeights), Math.min(self.chunkSize, self.outChannels)).add(filterBiases);
+ const greater = tf.greater(outA, outB);
+ const newoutB = tf.where(greater, outA, outB);
+ const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC);
+ // Dispose the old tensors before reassigning
+ tf.dispose([outB, outC]);
+ return [newoutC, newoutB];
+ });
- model.then(function (res) {
+ await showMemStatus(chIdx, self.outChannels);
- let batch_D, batch_H, batch_W;
- let input_shape;
- let modelObject = {};
+ // Assign the new values to outC and outB
+ outC = result[0];
+ outB = result[1];
- modelObject = res;
+ if(chIdx == (self.outChannels -1)) {
- let isChannelLast = isModelChnlLast(modelObject);
- const batchSize = opts.batchSize;
- const numOfChan = opts.numOfChan;
-
- //-- Test and adjust model input shape dim after padding ..
- for (let i = 0; i < slices_3d.rank; i++) {
- if(isChannelLast) {
- if(slices_3d.shape[i] < modelObject.layers[0].batchInputShape[i+1]) {
- console.log(" cropped slices_3d with pad < model input shape dim ");
- modelObject.layers[0].batchInputShape[i+1] = slices_3d.shape[i];
- }
+ window.clearInterval( seqTimer );
- } else {
- if(slices_3d.shape[i] < modelObject.layers[0].batchInputShape[i+2]) {
- console.log(" cropped slices_3d with pad < model input shape dim ");
- modelObject.layers[0].batchInputShape[i+2] = slices_3d.shape[i];
- }
- }
- }
+ tf.dispose(outB);
+ const endTime = performance.now();
+ const executionTime = endTime - startTime;
+ console.log(`Execution time for output layer: ${executionTime} milliseconds`);
- // Get model input shape
- if(isChannelLast) {
- batch_D = modelObject.layers[0].batchInputShape[1];
- batch_H = modelObject.layers[0].batchInputShape[2];
- batch_W = modelObject.layers[0].batchInputShape[3];
- input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan];
- } else {
- batch_D = modelObject.layers[0].batchInputShape[2];
- batch_H = modelObject.layers[0].batchInputShape[3];
- batch_W = modelObject.layers[0].batchInputShape[4];
- input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W];
- }
-
- const isBatchOverlapEnable = inferenceModelsList[$$("selectModel").getValue() - 1]["isBatchOverlapEnable"];
-
- let allBatches = [];
- let headSubCubesCoords = [];
-
- if(isBatchOverlapEnable) {
- // Number of additional batches focus on the brain/head volume
- let numOverlapBatches = inferenceModelsList[$$("selectModel").getValue() - 1]["numOverlapBatches"];
- console.log(" num of overlapped batches: ", numOverlapBatches);
-
- // Find the centroid of 3D head volume and the variance
- let cent_var = cubeMoments(slices_3d, 0.5);
- // Mean or centroid
- const headCentroid = cent_var[0];
- console.log(" Head 3D Centroid : ", headCentroid);
- // Variance
- const sigma = cent_var[1];
- console.log(" Head 3D Variance : ", sigma);
-
- headSubCubesCoords = findCoordsOfAddBrainBatches(numOverlapBatches,
- new Array(headCentroid[0], headCentroid[1], headCentroid[2]),
- new Array(sigma[0], sigma[1], sigma[2]),
- new Array(slices_3d.shape[0], slices_3d.shape[1], slices_3d.shape[2]),
- new Array(batch_D, batch_H, batch_W));
-
- allBatches = sliceVolumeIntoOverlappedBatches(slices_3d, slices_3d.shape[0], slices_3d.shape[1], slices_3d.shape[2], batch_D, batch_H, batch_W, headSubCubesCoords);
-
- } else {
- // This option will cover all slices, some slices that are not enough to create a batch will need overlap with prevous batch slices
- // e.g. slice volume = 3*5*5 DHW , and batch is 2*2*2 , 2*3*3 =18 batches will be considered
- let num_of_batches = Math.ceil(slices_3d.shape[2]/batch_W) * Math.ceil(slices_3d.shape[1]/batch_H) * Math.ceil(slices_3d.shape[0]/batch_D);
- console.log("Num of Batches for inference: ", num_of_batches);
-
- allBatches = sliceVolumeIntoBatches(slices_3d, slices_3d.shape[0], slices_3d.shape[1], slices_3d.shape[2], batch_D, batch_H, batch_W);
- }
-
- tf.dispose(slices_3d);
-
- statData["No_SubVolumes"] = allBatches.length;
- statData["Brainchop_Ver"] = "SubVolumes";
-
- let allPredictions = [];
-
- try {
- let startTime = performance.now();
- let inferenceStartTime = performance.now();
- // maxLabelPredicted in whole volume of the brain
- let maxLabelPredicted = 0;
- let expected_Num_labels;
-
- let delay = inferenceModelsList[$$("selectModel").getValue() - 1]["inferenceDelay"];
- console.log("Inference delay :", delay);
-
- let layersLength = res.layers.length;
- console.log("res.layers.length ", layersLength);
-
- statData["Input_Shape"] = JSON.stringify(res.layers[0].batchInputShape);
- statData["Output_Shape"] = JSON.stringify(res.output.shape);
- statData["Channel_Last"] = isChannelLast;
- statData["Model_Param"] = getModelNumParameters(res);
- statData["Model_Layers"] = getModelNumLayers(res);
- statData["Model"] = inferenceModelsList[$$("selectModel").getValue() - 1]["modelName"];
- statData["Extra_Info"] = null;
-
- let curProgBar = parseInt(document.getElementById("progressBar").style.width);
-
- let j = 0;
- let timer = window.setInterval(function() {
- let curTensor = [];
- curTensor[0] = tf.tensor(allBatches[j].data.dataSync(), input_shape);
-
- let lastIdx = 0;
-
- for (let i = 1; i < layersLength; i++) {
- try {
- if (res.layers[i].activation.getClassName() !== 'linear') {
- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
- } else {
-
- curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
- res.layers[i].getWeights()[0],
- res.layers[i].getWeights()[1],
- res.layers[i].strides,
- res.layers[i].padding,
- res.layers[i].dilationRate,
- 3); // important for memory use
- }
- } catch(err) {
-
- if( err.message === "Failed to compile fragment shader.") {
- webix.confirm({
- title:"",
- ok:"Ok",
- cancel:"Cancel",
- type: "confirm-error",
- width: 500,
- text: "Context lost due to limited Memory available, please check current browser resouces in the toolbar and verified GPUs for each model"
- })
- .then(() => {
- //---
- $$("browserResourcesWindow").show();
-
-
- }).fail(() => {
- //---
-
- });
-
- } else {
- webix.alert(err.message);
- }
-
- window.clearInterval( timer );
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err.message;
- statData["Extra_Err_Info"] = "Failed while model layer " + i + " apply";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
-
-
- return 0;
- }
-
- if( j == allBatches.length-1 ) {
- console.log("layer ", i);
- console.log("layer output Tensor shape : ", curTensor[i].shape);
- console.log("layer count params ", res.layers[i].countParams());
- }
-
- curTensor[i-1].dispose();
- lastIdx += 1;
- }
-
-
- // Get axis
- let axis = isChannelLast ? -1 : 1;
- let prediction_argmax = tf.argMax(curTensor[lastIdx], axis);
-
- if( j == allBatches.length - 1 ) {
- expected_Num_labels = isChannelLast ? curTensor[lastIdx].shape[4] : curTensor[lastIdx].shape[1];
- }
-
- tf.dispose(curTensor[lastIdx]);
-
- allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
- let curBatchMaxLabel = findArrayMax(Array.from(prediction_argmax.dataSync()));
-
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
-
- tf.dispose(prediction_argmax);
-
-
- let memStatus = tf.memory().unreliable ? "Red" : "Green";
- let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons.fontcolor("red").bold() : "";
- document.getElementById("progressBar").style.width = (curProgBar + (j + 1)*(100 - curProgBar)/allBatches.length) + "%";
-
- document.getElementById("memoryStatus").style.backgroundColor = memStatus;
-
- //-- let memoryStatusData=[{ memoryUse: Math.round(tf.memory().numBytesInGPU/(1024*1024*20))}];
- //-- $$("memoryMonitor").clearAll();
- //-- $$("memoryMonitor").parse(memoryStatusData);
-
- //-- document.getElementById("progressBar").innerHTML= Math.floor((j+1)*100/allBatches.length) + "%";
-
- if( j == allBatches.length-1 ) {
- window.clearInterval( timer );
-
- let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
-
- let numSegClasses = maxLabelPredicted + 1;
- console.log("Num of seg classes: ", numSegClasses);
-
- statData["Actual_Labels"] = numSegClasses;
- statData["Expect_Labels"] = expected_Num_labels;
- statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
-
-
- startTime = performance.now();
- // Generate output volume or slices
- console.log("Merging subvolumes... ");
- let outLabelVolume = tf.tidy(() => {
- return mergeSubVolumesV2(allPredictions, slices_3d.shape[0], slices_3d.shape[1], slices_3d.shape[2], numSegClasses, batch_D, batch_H, batch_W, axis);
- })
-
- allPredictions = [];
- let Merge_t = ((performance.now() - startTime)/1000).toFixed(4);
-
- if(enableCrop) {
- let pad = inferenceModelsList[$$("selectModel").getValue() - 1]["cropPadding"];
- outLabelVolume = removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad);
- console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape);
- outLabelVolume = resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
- console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape);
- }
-
- let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
- tf.dispose(outLabelVolume);
-
-
- startTime = performance.now();
- console.log("Generating output...");
- try {
- generateOutputSlicesV2(unstackOutVolumeTensor, num_of_slices, numSegClasses, slice_height, slice_width);
- console.log(" SubVolume inference num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
- } catch(error) {
-
-
- //-- Timing data to collect
-
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- webix.alert("Failed while generating output due to limited browser memory available");
-
- statData["Inference_t"] = Inference_t;
- statData["Merge_t"] = Merge_t;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = error.message;
- statData["Extra_Err_Info"] = "Failed while generating output";
-
- document.getElementById("progressBar").style.width = 0;
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- return 0;
-
- }
-
- let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
-
- document.getElementById("progressBar").style.width = 0;
- //webix.message.hide("waitMessage");
-
-
- $$("downloadBtn").enable();
- $$("segmentBtn").enable();
- // $$("imageUploader").enable();
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
-
- console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
- ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
-
- //-- Timing data to collect
- statData["Inference_t"] = Inference_t;
- statData["Merge_t"] = Merge_t;
- statData["Postprocess_t"] = Postprocess_t;
- statData["Status"] = "OK"
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
-
- }
-
- j++;
-
- }, delay);
-
- }
- catch(err) {
- webix.alert(err.message);
- console.log( err.message );
- console.log(
- "If webgl context is lost, try to restore webgl context by visit the link " +
- 'here'
- );
-
- document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green";
- document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green";
- }
-
- });
-
- }
-
- /////////////////////////////////////////////////////////////////////////
-///////////////----------------SEQ LAYER-----------------////////////////
-////////////////////////////////////////////////////////////////////////
-
-/**
-* This function is designed to process a large tensor in smaller chunks to manage memory usage effectively.
-*
-* @since 3.0.0
-* @param {tf.Tensor} inputTensor e.g.[ D, H, W, Ch] or [ Ch, D, H, W]->[ 256, 256, 256, 5 ] or [ 5, 256, 256, 256 ]
-* @param {tf.Tensor} vector - e.g. filterWeight: [-1.4474995, 0.6897876, -0.2812168, -0.0344299, 1.266812]
-* @param {number} chunkSize -parameter important for memory, the larger it is, the more memory in use. e.g. 4
-* @return {tf.Tensor}
-*
-*/
-
-function processTensorInChunks(inputTensor, vector, chunkSize) {
- const rank = inputTensor.rank;
- const lastDimension = inputTensor.shape[rank - 1];
-
- if (lastDimension !== vector.size) {
- throw new Error('The last dimension of the input tensor must match the length of the vector.');
- }
-
- if (chunkSize <= 0 || chunkSize > lastDimension) {
- throw new Error('Invalid chunk size.');
- }
-
- return tf.tidy(() => {
- let accumulatedResult = null;
-
- for (let i = 0; i < lastDimension; i += chunkSize) {
- const sliceSize = Math.min(chunkSize, lastDimension - i);
-
- const tensorSlice = inputTensor.slice([...Array(rank - 1).fill(0), i], [-1, -1, -1, sliceSize]);
- const vectorSlice = vector.slice(i, sliceSize);
-
- const multiplied = tf.mul(tensorSlice, vectorSlice);
- tensorSlice.dispose();
- vectorSlice.dispose();
-
- const summed = tf.sum(multiplied, -1);
- multiplied.dispose(); // Dispose of the multiplied tensor, as we no longer need it.
-
- if (accumulatedResult === null) {
- accumulatedResult = summed;
- } else {
- // Before updating accumulatedResult, dispose of the previous tensor
- const oldAccumulatedResult = accumulatedResult;
- accumulatedResult = oldAccumulatedResult.add(summed);
- oldAccumulatedResult.dispose(); // Dispose of the old accumulated result
- summed.dispose(); // Dispose of the summed tensor, as it is now part of the accumulated result
- }
- }
-
- return accumulatedResult;
- });
-}
-
-
-/**
-* This function is show memory status while running sequential processing
-*
-* @since 3.0.0
-* @param {number} chIdx
-* @param {number} totalChannels
-* @return {promise}
-*
-*/
-
-showMemStatus = async(chIdx, totalChannels) => {
-
- return new Promise((resolve, reject) => {
-
- let memStatus = tf.memory().unreliable ? "Red" : "Green";
- let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons.fontcolor("red").bold() : "";
- document.getElementById("memoryStatus").style.backgroundColor = memStatus;
-
- document.getElementById("memStatusParagraph").innerHTML = "Channels completed: " + (chIdx + 1) + " / " + totalChannels +
- // https://js.tensorflow.org/api/latest/#memory
- "
" +"TF Memory Status: " + memStatus.fontcolor(tf.memory().unreliable ? "red" : "green").bold() +
- // numBytes: Number of bytes allocated (undisposed) at this time
- "
" + "numBytes : " + Math.round(tf.memory().numBytes/(1024*1024)) + " MB" +
- //numBytesInGPU : Number of bytes allocated (undisposed) in the GPU only at this time
- "
" + "numBytesInGPU : " + Math.round(tf.memory().numBytesInGPU/(1024*1024)) + " MB" +
- "
" + "numBytesInGPUAllocated : " + Math.round(tf.memory().numBytesInGPUAllocated/(1024*1024)) + " MB" +
- "
" + "numBytesInGPUFree : " + Math.round(tf.memory().numBytesInGPUFree/(1024*1024)) + " MB" +
- // numDataBuffers : Number of unique data buffers allocated (undisposed) at this time, which is ≤ the number of tensors
- "
" + "numDataBuffers : " + tf.memory().numDataBuffers +
- "
" + "numTensors : " + tf.memory().numTensors +
- "
" + unreliableReasons ;
-
- resolve(); // When this fires, the code in a().then(/..../); is executed.
-
- });
-
-
-}
-
-
-class SequentialConvLayer {
- constructor(model, chunkSize, isChannelLast) {
- this.model = model;
- this.outChannels = model.outputLayers[0].kernel.shape[4];
- this.chunkSize = Math.min(chunkSize, this.outChannels)
- this.isChannelLast = isChannelLast;
- }
-
- /**
- * Apply sequential convolution layer
- * @since 3.0.0
- * @member SequentialConvLayer
- * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ]
- * @return {promise}
- *
- * convLayer.rank -> 3
- * typeof(convLayer) -> "object"
- * convLayer: Object { dataFormat: "channelsLast", dilationRate: Array(3) [ 1, 1, 1 ], inputSpec: Array [ {…} ],
- * name: "output", padding: "same", strides: Array(3) [ 1, 1, 1 ], ...}
- *
- * weights.shape -> Array(5) [ 1, 1, 1, 5, 3 ]
- * weights.print()
- * //=> Tensor
- * [[[[[0.146999 , -1.4474995, -2.8961499],
- * [1.1067894, 0.6897876 , -0.7573005],
- * [-0.38512 , -0.2812168, -0.8637539],
- * [0.9341159, -0.0344299, -2.3668685],
- * [0.1052373, 1.266812 , 0.6542516 ]]]]]
- *
- * biases.shape -> Array [ 3 ]
- * biases.print()
- * //=> Tensor
- * [-0.7850812, -2.3238883, 2.1639345]
- *
- * for idx = 0 -> filterWeights.shape -> Array(5) [ 1, 1, 1, 5, 1 ]
- * filterWeights.print()
- * //=> Tensor
- * [[[[[0.146999 ],
- * [1.1067894],
- * [-0.38512 ],
- * [0.9341159],
- * [0.1052373]]]]]
- *
- * for idx = 0 -> filterBiases.shape -> Array [1]
- * filterBiases.print()
- * //=> Tensor
- * [-0.7850812]
-
- */
-
- async apply(inputTensor) {
-
- const self = this;
- // Important to avoid "undefined" class var members inside the timer.
- // "this" has another meaning inside the timer.
-
- // *** WARNING!!! if you uncomment this line the memory leak will break webGL and may reboot your machine
- //document.getElementById("progressBarChild").parentElement.style.visibility = "visible";
-
- return new Promise((resolve, reject) => {
-
- const startTime = performance.now();
-
- const convLayer = self.model.layers[self.model.layers.length - 1];
- const weights = convLayer.getWeights()[0]; //
- const biases = convLayer.getWeights()[1];
- const outputShape = self.isChannelLast ? inputTensor.shape.slice(1,-1) : inputTensor.shape.slice(2);
-
- //-- e.g. outputShape : [256,256,256] or cropped Dim
- //-- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W]
- //-- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W]
-
- let outB = tf.mul(tf.ones(outputShape), -10000);
- //-- e.g. outB.shape [256,256,256]
- let outC = tf.zeros(outputShape);
- //-- e.g. outC.shape [256,256,256]
- let chIdx = 0;
-
- // console.log("---------------------------------------------------------");
- console.log(" channel loop");
-
- let seqTimer = window.setInterval(function() {
-
- console.log(chIdx);
-
- const result = tf.tidy(() => {
- const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]);
- // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ]
- const filterBiases = biases.slice([chIdx], [1]);
- //-- e.g. filterBiases.shape [1] -> Tensor [-0.7850812]
- const outA = processTensorInChunks(tf.squeeze(inputTensor), tf.squeeze(filterWeights), self.chunkSize).add(filterBiases);
- const greater = tf.greater(outA, outB);
- const newoutB = tf.where(greater, outA, outB);
- const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC);
- // Dispose the old tensors before reassigning
- tf.dispose([outB, outC]);
- return [newoutC, newoutB];
- });
-
- // -- await showMemStatus(chIdx, self.outChannels);
- // Log memory usage
- const memoryInfo = tf.memory();
- console.log(`Iteration ${chIdx}:`);
- console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
- console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
- console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
- console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
- console.log(`Unreliable: ${memoryInfo.unreliable}`);
-
- // Assign the new values to outC and outB
- outC = result[0];
- outB = result[1];
-
- if(chIdx == (self.outChannels -1)) {
-
- window.clearInterval( seqTimer );
- // *** WARNING!!! if you uncomment this line the memory leak will break webGL and may reboot your machine
- // document.getElementById("progressBarChild").style.width = 0 + "%";
- tf.dispose(outB);
- const endTime = performance.now();
- const executionTime = endTime - startTime;
- console.log(`Execution time for output layer: ${executionTime} milliseconds`);
- resolve(outC);
- }
- chIdx++;
- // *** WARNING!!! if you uncomment this line the memory leak will break webGL and may reboot your machine
- //document.getElementById("progressBarChild").style.width = (chIdx + 1)*100/self.outChannels + "%";
- }, 100);
- });
-
- }
-}
-
-
-
-/**
-* This function better memory managment during the model layer processing
-*
-* @since 3.0.0
-* @param {tf.Tensor} input
-* @param {tf.Tensor} filter
-* @param {tf.Tensor} biases
-* @param {Array} stride e.g. [ 1, 1, 1 ]
-* @param {string} pad e.g. "same"
-* @param {Array} dilationRate e.g. [ 1, 1, 1 ]
-* @param {number} sliceSize e.g. 3
-* @return {}
-*
-*/
-
-function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) {
- const batchSize = input.shape[0];
- const depth = input.shape[1];
- const height = input.shape[2];
- const width = input.shape[3];
- const inChannels = input.shape[4];
- const outChannels = filter.shape[4];
-
- // Create an empty array to hold the output channels
- let outputChannels = null;
-
- // Slice the input tensor and process one output channel at a time
- for (let channel = 0; channel < outChannels; channel++) {
- const numSlices = Math.ceil(inChannels / sliceSize);
- const biasesSlice = biases.slice([channel], [1]);
- let outputChannel = null;
-
- for (let i = 0; i < numSlices; i++) {
- const startChannel = i * sliceSize;
- const endChannel = Math.min((i + 1) * sliceSize, inChannels);
-
- // Only proceed if there are channels to process
- if (startChannel < inChannels) {
- const resultSlice = tf.tidy(() => {
- const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]);
- const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]);
- // Perform the convolution for the current slice and output channel
- return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate);
- });
-
- if (outputChannel === null) {
- outputChannel = resultSlice;
- } else {
- const updatedOutputChannel = outputChannel.add(resultSlice);
- outputChannel.dispose();
- resultSlice.dispose();
- outputChannel = updatedOutputChannel;
- }
- }
- }
-
- // Add the biases to the accumulated convolutions for this channel
- const biasedOutputChannel = outputChannel.add(biasesSlice);
- outputChannel.dispose();
- biasesSlice.dispose();
-
- // Accumulate the channel to the output array
- if (outputChannels == null){
- outputChannels = biasedOutputChannel;
- }else{
- const updatedOutputChannels = tf.concat([outputChannels, biasedOutputChannel], 4);
- biasedOutputChannel.dispose();
- outputChannels.dispose();
- outputChannels = updatedOutputChannels;
- }
- }
-
- return outputChannels;
-}
-
-
-
-/**
-* Inference Function for full volume and also apply sequential convoluton layer
-* Suitable for low memory devices and low performance devices.
-*
-* @since 1.0.0
-* @param {promise} model
-* @param {tf.Tensor} slices_3d
-* @param {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
-* @param {boolen} isChannelLast- check input shape for channel position.
-* @param {number} num_of_slices- Total Number of slices a.k.a z-dim
-* @param {number} slice_height- - Slice Height
-* @param {number} slice_width- Slice Width
-*
-*/
-
- inferenceFullVolumeSeqCovLayer = (model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width) => {
- console.log(" ---- Start FullVolume Inference with Sequential Convoluton Layer ---- ");
-
- statData["No_SubVolumes"] = 1;
-
- model.then(function (res) {
-
- try {
- startTime = performance.now();
- let inferenceStartTime = performance.now();
- // maxLabelPredicted in whole volume of the brain
- let maxLabelPredicted = 0;
- let transpose = inferenceModelsList[$$("selectModel").getValue() - 1]["enableTranspose"];
- let delay = inferenceModelsList[$$("selectModel").getValue() - 1]["inferenceDelay"];
- console.log("Inference delay :", delay);
-
- let i = 1;
- let layersLength = res.layers.length;
- console.log("Total num of layers ", layersLength);
-
- // Determine the number of output channels in the last layer of the model
- // e.g. 3, 50, 104
- const outputLayer = res.layers[res.layers.length - 1];
- console.log("Output Layer : ", outputLayer);
-
- const expected_Num_labels = isChannelLast ?
- outputLayer.outputShape[outputLayer.outputShape.length - 1]:
- outputLayer.outputShape[1];
- console.log("Num of output channels : ", expected_Num_labels);
-
-
- let curTensor = [];
- curTensor[0] = slices_3d.reshape(input_shape);
- // console.log("curTensor[0] :", curTensor[0].dataSync());
-
- let timer = window.setInterval(async function() {
-
- try {
- if (res.layers[i].activation.getClassName() !== 'linear') {
- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
- } else {
-
- curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
- res.layers[i].getWeights()[0],
- res.layers[i].getWeights()[1],
- res.layers[i].strides,
- res.layers[i].padding,
- res.layers[i].dilationRate,
- 3); // important for memory use
- }
- // Log memory usage
- const memoryInfo = tf.memory();
- console.log(`Iteration ${i}:`);
- console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
- console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
- console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
- console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
- console.log(`Unreliable: ${memoryInfo.unreliable}`);
- tf.dispose(curTensor[i-1]);
-
- } catch(err) {
-
- if( err.message === "Failed to compile fragment shader.") {
- webix.confirm({
- title:"",
- ok:"Ok",
- cancel:"Cancel",
- type: "confirm-error",
- width: 500,
- text: "Context lost due to limited Memory available, please check current browser resouces in the toolbar and verified GPUs for each model"
- })
- .then(() => {
- //---
- $$("browserResourcesWindow").show();
-
-
- }).fail(() => {
- //---
-
- });
-
- } else {
- webix.alert(err.message);
- }
-
- window.clearInterval( timer );
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err.message;
- statData["Extra_Err_Info"] = "Failed while model layer " + i + " apply";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- return 0;
- } // end of catch
-
- console.log("layer ", i);
- console.log("layer output Tensor shape : ", curTensor[i].shape);
- console.log("layer count params ", res.layers[i].countParams());
-
- res.layers[i].dispose();
- curTensor[i-1].dispose();
-
- document.getElementById("progressBar").style.width = (i + 1)*100/layersLength + "%";
- let memStatus = tf.memory().unreliable ? "Red" : "Green";
- let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons : "";
- document.getElementById("memoryStatus").style.backgroundColor = memStatus;
-
- if( i == layersLength - 2) { //Stop before the last layer or classification layer.
-
- window.clearInterval( timer );
-
- // // Create an instance of SequentialConvLayer
- // The second parameter is important for memory,
- // the larger it is, the more memory it uses
- // it was 8, but I set it to 3, got a different error
- seqConvLayer = new SequentialConvLayer(res, 4, isChannelLast);
-
- // Apply the last output tensor to the seq. instance
- let outputTensor = null;
-
- const profileInfo = await tf.profile(async() => {
- // Your tensor operations here
- outputTensor = await seqConvLayer.apply(curTensor[i]);
- });
-
- console.log("profileInfo : ",profileInfo);
-
- //-- document.getElementById("progressBarChild").style.width = 0 + "%";;
-
- // Dispose the previous layer input tensor
- tf.dispose(curTensor[i]);
- // delete the used class
- delete seqConvLayer;
-
- // You can now use 'outputTensor' as needed
- console.log(outputTensor);
- console.log(" Output tensor shape : ", outputTensor.shape);
- // Array(3) [ 256, 256, 256 ]
-
- if(outputTensor.shape.length != 3) {
- webix.alert("Output tensor shape should be 3 dims but it is " + outputTensor.shape.length, "alert-error");
- }
-
- let Inference_t = ((performance.now() - startTime) / 1000).toFixed(4);
-
- console.log("find array max: ");
- let curBatchMaxLabel = findArrayMax(Array.from(outputTensor.dataSync()));
-
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
-
- let numSegClasses = maxLabelPredicted + 1;
- console.log("Predicted num of segmentation classes", numSegClasses);
- statData["Actual_Labels"] = numSegClasses;
- statData["Expect_Labels"] = expected_Num_labels;
- statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
-
- if( numSegClasses != expected_Num_labels ) {
- webix.alert("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses, "alert-error");
- console.log("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses);
- }
-
-
- // Transpose MRI data to be match pytorch/keras input output
- if(transpose) {
- console.log("outLabelVolume transposed");
- outputTensor = outputTensor.transpose();
- }
-
- let unstackOutVolumeTensor = tf.unstack(outputTensor);
- tf.dispose(outputTensor);
-
- startTime = performance.now();
-
- // Generate output volume or slices
- console.log("Generating output");
-
- try {
- generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
- console.log(" FullVolume inference num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
- } catch (error) {
-
- //-- Timing data to collect
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- console.log("Error while generating output: ", error)
-
- webix.alert("Failed while generating output due to limited browser memory available");
-
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = error.message;
- statData["Extra_Err_Info"] = "Failed while generating output";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- return 0;
- }
-
- let Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4);
-
- document.getElementById("progressBar").style.width = 0;
-
- $$("downloadBtn").enable();
- $$("segmentBtn").enable();
-
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
- ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
-
- //-- Timing data to collect
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Postprocess_t;
- statData["Status"] = "OK";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- } else {
-
- i++;
- }
-
-
- }, delay);
-
- } catch(err) {
-
- webix.alert(err.message);
- console.log( err.message );
- console.log(
- "If webgl context is lost, try to restore webgl context by visit the link " +
- 'here'
- );
-
-
- document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green";
-
- document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green";
- }
- });
-
- }
-
-
-
-/**
-* Inference function for full volume that crops input MRI and also apply sequential convoluton layer (Phase 2)
-* Suitable for low memory devices and low performance devices.
-* Phase-1 find the mask
-*
-* @since 1.2.0
-* @param {promise} model, selected model for inference.
-* @param {tf.Tensor} pipeline1_out 3D e.g. null or tensor
-* @param {tf.Tensor} slices_3d
-* @param {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
-* @param {number} num_of_slices- Total Number of slices a.k.a z-dim
-* @param {number} slice_height- - Slice Height
-* @param {number} slice_width- Slice Width
-*
-*/
-
- inferenceFullVolumeSeqCovLayerPhase2 = async(model, slices_3d, num_of_slices, slice_height, slice_width, pipeline1_out) => {
-
- //--Phase-2, After remove the skull try to allocate brain volume and make inferece
- console.log(" ---- Start FullVolume Inference with Sequential Conv Layer for phase-II ---- ");
-
- let mask_3d;
-
- if(pipeline1_out == null) {
- // binarize original image
- mask_3d = slices_3d.greater([0]).asType('bool');
-
- } else {
-
- mask_3d = pipeline1_out.greater([0]).asType('bool');
- //-- pipeline1_out.dispose();
-
- }
-
- console.log(" mask_3d shape : ", mask_3d.shape);
-
- const coords = await tf.whereAsync(mask_3d);
- //-- Get each voxel coords (x, y, z)
-
- mask_3d.dispose();
-
- const coordsArr = coords.arraySync();
-
- let row_min = slice_height, row_max = 0, col_min = slice_width, col_max = 0, depth_min = num_of_slices, depth_max = 0;
-
- for(let i = 0; i < coordsArr.length; i++) {
-
- if ( row_min > coordsArr[i][0] ) {
- row_min = coordsArr[i][0];
- } else if(row_max < coordsArr[i][0]) {
- row_max = coordsArr[i][0];
- }
-
- if ( col_min > coordsArr[i][1] ) {
- col_min = coordsArr[i][1];
- } else if(col_max < coordsArr[i][1]) {
- col_max = coordsArr[i][1];
- }
-
- if ( depth_min > coordsArr[i][2] ) {
- depth_min = coordsArr[i][2];
- } else if(depth_max < coordsArr[i][2]) {
- depth_max = coordsArr[i][2];
- }
- }
-
-
- console.log( "row min and max :", row_min, row_max);
- console.log( "col min and max :", col_min, col_max);
- console.log( "depth min and max :", depth_min, depth_max);
-
- //-- Reference voxel that cropped volume started slice with it
- let refVoxel = [row_min, col_min, depth_min];
- // -- Starting form refVoxel, size of bounding volume
- let boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1];
-
- coords.dispose();
-
- //-- Extract 3d object (e.g. brain)
- let cropped_slices_3d = slices_3d.slice([row_min, col_min, depth_min], [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] )
-
- slices_3d.dispose();
-
- //-- Padding size add to cropped brain
- let pad = inferenceModelsList[$$("selectModel").getValue() - 1]["cropPadding"];
-
- // Create margin around the bounding volume
- cropped_slices_3d_w_pad = addZeroPaddingTo3dTensor(cropped_slices_3d, [pad, pad] , [pad, pad], [pad, pad]);
- console.log(" cropped slices_3d with padding shape: ", cropped_slices_3d_w_pad.shape);
-
- cropped_slices_3d.dispose();
-
-
- if(opts.drawBoundingVolume) {
-
- let testVol = removeZeroPaddingFrom3dTensor(cropped_slices_3d_w_pad, pad, pad, pad);
- console.log(" outLabelVolume without padding shape : ", testVol.shape);
-
- testVol = resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
- console.log(" outLabelVolume final shape after resizing : ", testVol.shape);
-
- draw3dObjBoundingVolume(tf.unstack(testVol));
- testVol.dispose();
-
- return 0;
- }
-
-
- statData["Brainchop_Ver"] = "FullVolume";
-
- model.then(function (res) {
-
- try {
- startTime = performance.now();
- let inferenceStartTime = performance.now();
- // maxLabelPredicted in whole volume of the brain
- let maxLabelPredicted = 0;
- let transpose = inferenceModelsList[$$("selectModel").getValue() - 1]["enableTranspose"];
- let delay = inferenceModelsList[$$("selectModel").getValue() - 1]["inferenceDelay"];
- console.log("Inference delay :", delay);
-
- if(transpose) {
- cropped_slices_3d_w_pad = cropped_slices_3d_w_pad.transpose()
- console.log("Input transposed for pre-model");
- } else {
- console.log("Transpose not enabled for pre-model");
- }
-
- let i = 1;
- let layersLength = res.layers.length;
- console.log("res.layers.length ", layersLength);
-
- let isChannelLast = isModelChnlLast(res);
- const batchSize = opts.batchSize;
- const numOfChan = opts.numOfChan;
-
- //-- Adjust model input shape
- if(isChannelLast) {
-
- res.layers[0].batchInputShape[1] = cropped_slices_3d_w_pad.shape[0];
- res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[1];
- res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[2];
-
- adjusted_input_shape = [batchSize, res.layers[0].batchInputShape[1],
- res.layers[0].batchInputShape[2],
- res.layers[0].batchInputShape[3],
- numOfChan];
-
- } else {
-
- res.layers[0].batchInputShape[2] = cropped_slices_3d_w_pad.shape[0];
- res.layers[0].batchInputShape[3] = cropped_slices_3d_w_pad.shape[1];
- res.layers[0].batchInputShape[4] = cropped_slices_3d_w_pad.shape[2];
-
- adjusted_input_shape = [batchSize, numOfChan,
- res.layers[0].batchInputShape[2],
- res.layers[0].batchInputShape[3],
- res.layers[0].batchInputShape[4]];
-
- }
-
- console.log(" Model batch input shape : ", res.layers[0].batchInputShape);
- // -- batchInputShape {Array} input_shape - e.g. [?, D, H, W, Ch] or [?, Ch, D, H, W]
-
- statData["Input_Shape"] = JSON.stringify(res.layers[0].batchInputShape);
- statData["Output_Shape"] = JSON.stringify(res.output.shape);
- statData["Channel_Last"] = isChannelLast;
- statData["Model_Param"] = getModelNumParameters(res);
- statData["Model_Layers"] = getModelNumLayers(res);
- statData["Model"] = inferenceModelsList[$$("selectModel").getValue() - 1]["modelName"];
- statData["Extra_Info"] = null;
-
-
- // Determine the number of output channels in the last layer of the model
- // e.g. 3, 50, 104
- const outputLayer = res.layers[res.layers.length - 1];
- console.log("Output Layer : ", outputLayer);
-
- const expected_Num_labels = isChannelLast ?
- outputLayer.outputShape[outputLayer.outputShape.length - 1]:
- outputLayer.outputShape[1];
- console.log("Num of output channels : ", expected_Num_labels);
-
-
-
- let curTensor = [];
- curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape);
- // console.log("curTensor[0] :", curTensor[0].dataSync());
-
- let curProgBar = parseInt(document.getElementById("progressBar").style.width);
-
- let timer = window.setInterval(async function() {
-
- try {
- if (res.layers[i].activation.getClassName() !== 'linear') {
- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
- } else {
-
- curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
- res.layers[i].getWeights()[0],
- res.layers[i].getWeights()[1],
- res.layers[i].strides,
- res.layers[i].padding,
- res.layers[i].dilationRate,
- 3); // important for memory use
- }
- // Log memory usage
- const memoryInfo = tf.memory();
- console.log(`Iteration ${i}:`);
- console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
- console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
- console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
- console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
- console.log(`Unreliable: ${memoryInfo.unreliable}`);
- tf.dispose(curTensor[i-1]);
-
- } catch(err) {
-
- if( err.message === "Failed to compile fragment shader.") {
- webix.confirm({
- title:"",
- ok:"Ok",
- cancel:"Cancel",
- type: "confirm-error",
- width: 500,
- text: "Context lost due to limited Memory available, please check current browser resouces in the toolbar and verified GPUs for each model"
- })
- .then(() => {
- //---
- $$("browserResourcesWindow").show();
-
-
- }).fail(() => {
- //---
-
- });
-
- } else {
- webix.alert(err.message);
- }
-
- window.clearInterval( timer );
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err.message;
- statData["Extra_Err_Info"] = "Failed while model layer " + i + " apply";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- return 0;
- }
-
- console.log("layer ", i);
- console.log("layer output Tensor shape : ", curTensor[i].shape);
- console.log("layer count params ", res.layers[i].countParams());
-
- res.layers[i].dispose();
- curTensor[i-1].dispose();
-
-
- document.getElementById("progressBar").style.width = (curProgBar + (i + 1)*(100 - curProgBar)/layersLength) + "%";
- let memStatus = tf.memory().unreliable ? "Red" : "Green";
- let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons : "";
- document.getElementById("memoryStatus").style.backgroundColor = memStatus;
-
-
- if( i == layersLength - 2) { //Stop before the last layer or classification layer.
-
- window.clearInterval( timer );
-
-
- // // Create an instance of SequentialConvLayer
- //The second parameter is important for memory,
- // the larger it is, the more memory it uses
- // it was 8, but I set it to 3, got a different error
- seqConvLayer = new SequentialConvLayer(res, 4, isChannelLast);
-
-
- // Apply the last output tensor to the seq. instance
- let outputTensor = null;
-
- const profileInfo = await tf.profile(async() => {
- // Your tensor operations here
- outputTensor = await seqConvLayer.apply(curTensor[i]);
- });
-
- console.log("profileInfo : ",profileInfo);
-
- //-- document.getElementById("progressBarChild").style.width = 0 + "%";;
-
- // Dispose the previous layer input tensor
- tf.dispose(curTensor[i]);
- // delete the used class
- delete seqConvLayer;
-
- // You can now use 'outputTensor' as needed
- console.log(outputTensor);
- console.log(" Output tensor shape : ", outputTensor.shape);
- // Array(3) [ 256, 256, 256 ]
-
- if(outputTensor.shape.length != 3) {
- webix.alert("Output tensor shape should be 3 dims but it is " + outputTensor.shape.length, "alert-error");
- }
-
-
- let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
-
- console.log(" find array max ");
- let curBatchMaxLabel = findArrayMax(Array.from(outputTensor.dataSync()));
-
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
-
- let numSegClasses = maxLabelPredicted + 1;
- console.log("Predicted num of segmentation classes", numSegClasses);
- statData["Actual_Labels"] = numSegClasses;
- statData["Expect_Labels"] = expected_Num_labels;
- statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
-
- if( numSegClasses != expected_Num_labels ) {
- webix.alert("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses, "alert-error");
- console.log("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses);
- }
-
- //-- Transpose back to fit Papaya display settings
- let outLabelVolume = outputTensor.reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2]]);
- tf.dispose(outputTensor);
-
- // Transpose MRI data to be match pytorch/keras input output
- if(transpose) {
- console.log("outLabelVolume transposed");
- outLabelVolume = outLabelVolume.transpose();
- }
-
- outLabelVolume = removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad);
- console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape);
- outLabelVolume = resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
- console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape);
-
- let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"];
-
- // To clean the skull area wrongly segmented inphase-2.
- if(pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
- outLabelVolume = outLabelVolume.mul(binarizeVolumeDataTensor(pipeline1_out));
- }
-
-
- let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
- tf.dispose(outLabelVolume);
-
- startTime = performance.now();
- // Generate output volume or slices
- console.log("Generating output");
-
- try {
-
- generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
- console.log(" Phase-2 num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
-
- } catch (error) {
-
- //-- Timing data to collect
- tf.engine().endScope();
- tf.engine().disposeVariables();
- console.log("Error while generating output: ", error)
-
- webix.alert("Failed while generating output due to limited browser memory available");
-
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = error.message;
- statData["Extra_Err_Info"] = "Failed while generating output";
-
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
-
- return 0;
- }
-
- let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
-
- document.getElementById("progressBar").style.width = 0;
- //webix.message.hide("waitMessage");
-
- $$("downloadBtn").enable();
- $$("segmentBtn").enable();
- // $$("imageUploader").enable();
- tf.engine().endScope();
- tf.engine().disposeVariables();
-
- console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
- ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
+ resolve(outC);
+ }
- //-- Timing data to collect
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Postprocess_t;
- statData["Status"] = "OK";
+ chIdx++;
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ }, 10);
+ });
- } else {
+ }
+}
- i++;
- }
+function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) {
+ const batchSize = input.shape[0];
+ const depth = input.shape[1];
+ const height = input.shape[2];
+ const width = input.shape[3];
+ const inChannels = input.shape[4];
+ const outChannels = filter.shape[4];
- }, delay);
+ // Create an empty array to hold the output channels
+ let outputChannels = null;
- } catch(err) {
+ // Slice the input tensor and process one output channel at a time
+ for (let channel = 0; channel < outChannels; channel++) {
+ const numSlices = Math.ceil(inChannels / sliceSize);
+ const biasesSlice = biases.slice([channel], [1]);
+ let outputChannel = null;
- webix.alert(err.message);
- console.log( err.message );
- console.log(
- "If webgl context is lost, try to restore webgl context by visit the link " +
- 'here'
- );
+ for (let i = 0; i < numSlices; i++) {
+ const startChannel = i * sliceSize;
+ const endChannel = Math.min((i + 1) * sliceSize, inChannels);
+ // Only proceed if there are channels to process
+ if (startChannel < inChannels) {
+ const resultSlice = tf.tidy(() => {
+ const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]);
+ const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]);
+ // Perform the convolution for the current slice and output channel
+ return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate);
+ });
- document.getElementById("webGl2Status").style.backgroundColor = isWebGL2ContextLost() ? "Red" : "Green";
+ if (outputChannel === null) {
+ outputChannel = resultSlice;
+ } else {
+ const updatedOutputChannel = outputChannel.add(resultSlice);
+ outputChannel.dispose();
+ resultSlice.dispose();
+ outputChannel = updatedOutputChannel;
+ }
+ }
+ }
- document.getElementById("memoryStatus").style.backgroundColor = tf.memory().unreliable ? "Red" : "Green";
- }
- });
+ // Add the biases to the accumulated convolutions for this channel
+ const biasedOutputChannel = outputChannel.add(biasesSlice);
+ outputChannel.dispose();
+ biasesSlice.dispose();
- }
+ // Accumulate the channel to the output array
+ if (outputChannels == null){
+ outputChannels = biasedOutputChannel;
+ }else{
+ const updatedOutputChannels = tf.concat([outputChannels, biasedOutputChannel], 4);
+ biasedOutputChannel.dispose();
+ outputChannels.dispose();
+ outputChannels = updatedOutputChannels;
+ }
+ }
+ return outputChannels;
+}
/**
* Inference Function for full volume
-* No Sequential Convolution Layer
-* Faster
-*
* @since 1.0.0
* @param {promise} model
* @param {tf.Tensor} slices_3d
@@ -5043,7 +3643,8 @@ function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad,
statData["No_SubVolumes"] = 1;
- //-- let modelLayersOrg = JSON.parse(JSON.stringify(modelObject));
+ // let modelLayersOrg = JSON.parse(JSON.stringify(modelObject));
+
model.then(function (res) {
@@ -5058,29 +3659,48 @@ function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad,
let i = 1;
let layersLength = res.layers.length;
- console.log("res.layers.length ", layersLength);
+ console.log("Total num of layers ", layersLength);
+
+ // Determine the number of output channels in the last layer of the model
+ // e.g. 3, 50, 104
+ const outputLayer = res.layers[res.layers.length - 1];
+ console.log("Output Layer : ", outputLayer);
+
+ const expected_Num_labels = isChannelLast ?
+ outputLayer.outputShape[outputLayer.outputShape.length - 1]:
+ outputLayer.outputShape[1];
+ console.log("Num of output channels : ", expected_Num_labels);
let curTensor = [];
curTensor[0] = slices_3d.reshape(input_shape);
// console.log("curTensor[0] :", curTensor[0].dataSync());
+ let timer = window.setInterval(async function() {
- let timer = window.setInterval(function() {
+ try {
+ if (res.layers[i].activation.getClassName() !== 'linear') {
+ curTensor[i] = res.layers[i].apply( curTensor[i-1]);
+ } else {
- try {
- if (res.layers[i].activation.getClassName() !== 'linear') {
- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
- } else {
+ curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
+ res.layers[i].getWeights()[0],
+ res.layers[i].getWeights()[1],
+ res.layers[i].strides,
+ res.layers[i].padding,
+ res.layers[i].dilationRate,
+ 3); // important for memory use
+ }
+ // Log memory usage
+ const memoryInfo = tf.memory();
+ console.log(`Iteration ${i}:`);
+ console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
+ console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
+ console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
+ console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
+ console.log(`Unreliable: ${memoryInfo.unreliable}`);
+ tf.dispose(curTensor[i-1]);
- curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
- res.layers[i].getWeights()[0],
- res.layers[i].getWeights()[1],
- res.layers[i].strides,
- res.layers[i].padding,
- res.layers[i].dilationRate,
- 3); // important for memory use
- }
} catch(err) {
if( err.message === "Failed to compile fragment shader.") {
@@ -5121,196 +3741,332 @@ function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad,
}
return 0;
- }
+ } // end of catch
console.log("layer ", i);
- console.log("layer output Tensor shape : ", curTensor[i].shape);
+ console.log("layer output Tenosr shape : ", curTensor[i].shape);
console.log("layer count params ", res.layers[i].countParams());
res.layers[i].dispose();
curTensor[i-1].dispose();
-
document.getElementById("progressBar").style.width = (i + 1)*100/layersLength + "%";
let memStatus = tf.memory().unreliable ? "Red" : "Green";
let unreliableReasons = tf.memory().unreliable ? "unreliable reasons :" + tf.memory().reasons : "";
document.getElementById("memoryStatus").style.backgroundColor = memStatus;
+ if( i == layersLength - 2) { //Stop before the last layer or classification layer.
+
+
+ // Check if the output layer has more than 3 channels
+ if (expected_Num_labels > 1) { //<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
+
+ window.clearInterval( timer );
+
+ // // Create an instance of SequentialConvLayer
+ //The second parameter is important for memory,
+ // the larger it is, the more memory it uses
+ // it was 8, but I set it to 3, got a different error
+ seqConvLayer = new SequentialConvLayer(res, 4, isChannelLast);
+
+
+ // Apply the last output tensor to the seq. instance
+ let outputTensor = null;
+
+ const profileInfo = await tf.profile(async() => {
+ // Your tensor operations here
+ outputTensor = await seqConvLayer.apply(curTensor[i]);
+ });
+
+ console.log("profileInfo : ",profileInfo);
+
+ // breakpoint()
+ // 'outputTensor' is the result tensor after applying the operation
+
+
+ // Dispose the previous layer input tensor
+ tf.dispose(curTensor[i]);
+ // delete the used class
+ delete seqConvLayer;
+
+ // You can now use 'outputTensor' as needed
+ console.log(outputTensor);
+ console.log(" Output tensor shape : ", outputTensor.shape);
+ // Array(3) [ 256, 256, 256 ]
+
+ if(outputTensor.shape.length != 3) {
+ webix.alert("Output tensor shape should be 3 dims but it is " + outputTensor.shape.length, "alert-error");
+ }
- if( i == layersLength - 1) {
- window.clearInterval( timer );
+ let Inference_t = ((performance.now() - startTime) / 1000).toFixed(4);
- // prediction = res.layers[res.layers.length-1].apply(curTensor[i]);
- // curTensor[i].print();
- //outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
+ console.log("find array max: ");
+ let curBatchMaxLabel = findArrayMax(Array.from(outputTensor.dataSync()));
- let axis = isChannelLast ? -1 : 1;
- console.log(" find argmax ")
- console.log("last Tensor shape : ", curTensor[i].shape);
- //-- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
- let expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1];
- let prediction_argmax;
+ if( maxLabelPredicted < curBatchMaxLabel ) {
+ maxLabelPredicted = curBatchMaxLabel;
+ }
- // Try for argMax with model output tensor.
+ let numSegClasses = maxLabelPredicted + 1;
+ console.log("Predicted num of segmentation classes", numSegClasses);
+ statData["Actual_Labels"] = numSegClasses;
+ statData["Expect_Labels"] = expected_Num_labels;
+ statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
- try {
- let argMaxTime = performance.now();
- console.log(" Try tf.argMax for fullVolume ..");
- prediction_argmax = tf.argMax(curTensor[i], axis);
- console.log("tf.argMax for fullVolume takes : ", ((performance.now() - argMaxTime)/1000).toFixed(4) );
+ if( numSegClasses != expected_Num_labels ) {
+ webix.alert("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses, "alert-error");
+ console.log("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses);
+ }
- } catch(err1) {
- // if channel last
- if(axis == -1) {
- try {
- let argMaxLargeTime = performance.now();
- console.log(" tf.argMax failed .. try argMaxLarge ..");
- let modelOutBuffer = tensor2LightBuffer(curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), 'float16');
- prediction_argmax = argMaxLarge(modelOutBuffer, num_of_slices, slice_height, slice_width, expected_Num_labels, 'float16');
- console.log("argMaxLarge for fullVolume takes : ", ((performance.now() - argMaxLargeTime)/1000).toFixed(4) );
+ // Transpose MRI data to be match pytorch/keras input output
+ if(transpose) {
+ console.log("outLabelVolume transposed");
+ outputTensor = outputTensor.transpose();
+ }
+
+ let unstackOutVolumeTensor = tf.unstack(outputTensor);
+ tf.dispose(outputTensor);
- } catch(err2) {
+ startTime = performance.now();
- let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
- webix.alert(errTxt);
+ // Generate output volume or slices
+ console.log("Generating output");
- prediction_argmax.dispose();
+ try {
+ generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
+ console.log(" FullVolume inference num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
+ } catch (error) {
- window.clearInterval( timer );
+ //-- Timing data to collect
tf.engine().endScope();
tf.engine().disposeVariables();
- statData["Inference_t"] = Infinity;
+ console.log("Error while generating output: ", error)
+
+ webix.alert("Failed while generating output due to limited browser memory available");
+
+ statData["Inference_t"] = Inference_t;
statData["Postprocess_t"] = Infinity;
statData["Status"] = "Fail";
- statData["Error_Type"] = err2.message;
- statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge failed";
+ statData["Error_Type"] = error.message;
+ statData["Extra_Err_Info"] = "Failed while generating output";
if(opts.telemetryFlag) {
submitTiming2GoogleSheet(statData);
}
return 0;
+ }
- }
+ let Postprocess_t = ((performance.now() - startTime) / 1000).toFixed(4);
- } else {
- // if channel first ..
- let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
- webix.alert(errTxt);
+ document.getElementById("progressBar").style.width = 0;
- prediction_argmax.dispose();
+ $$("downloadBtn").enable();
+ $$("segmentBtn").enable();
- window.clearInterval( timer );
tf.engine().endScope();
tf.engine().disposeVariables();
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err1.message;
- statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge not support yet channel first";
+ console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
+ ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ //-- Timing data to collect
+ statData["Inference_t"] = Inference_t;
+ statData["Postprocess_t"] = Postprocess_t;
+ statData["Status"] = "OK";
+
+ if(opts.telemetryFlag) {
+ submitTiming2GoogleSheet(statData);
+ }
- return 0;
}
- }
+ } else {
+ i++;
+ }
- console.log(" prediction_argmax shape : ", prediction_argmax.shape);
- //-- prediction_argmax.shape : [ 1, 256, 256, 256]
- let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
- //outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
- tf.dispose(curTensor[i]);
- // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
- console.log(" find array max ");
- let curBatchMaxLabel = findArrayMax(Array.from(prediction_argmax.dataSync()));
+ // if( i == layersLength - 1) {
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
+ // window.clearInterval( timer );
- let numSegClasses = maxLabelPredicted + 1;
- console.log("numSegClasses", numSegClasses);
- statData["Actual_Labels"] = numSegClasses;
- statData["Expect_Labels"] = expected_Num_labels;
- statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
+ // // prediction = res.layers[res.layers.length-1].apply(curTensor[i]);
+ // // curTensor[i].print();
+ // //outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
- //-- Transpose back to fit Papaya display settings
- let outLabelVolume = prediction_argmax.reshape([num_of_slices, slice_height, slice_width]);
- tf.dispose(prediction_argmax);
+ // let axis = isChannelLast ? -1 : 1;
+ // console.log(" find argmax ")
+ // console.log("last Tenosr shape : ", curTensor[i].shape);
+ // //-- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
+ // let expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1];
+ // let prediction_argmax;
- // Transpose MRI data to be match pytorch/keras input output
- if(transpose) {
- console.log("outLabelVolume transposed");
- outLabelVolume = outLabelVolume.transpose();
- }
+ // // Try for argMax with model output tensor.
- let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
- tf.dispose(outLabelVolume);
+ // try {
+ // let argMaxTime = performance.now();
+ // console.log(" Try tf.argMax for fullVolume ..");
+ // prediction_argmax = tf.argMax(curTensor[i], axis);
+ // console.log("tf.argMax for fullVolume takes : ", ((performance.now() - argMaxTime)/1000).toFixed(4) );
- startTime = performance.now();
- // Generate output volume or slices
- console.log("Generating output");
+ // } catch(err1) {
+ // // if channel last
+ // if(axis == -1) {
- try {
- generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
- console.log(" FullVolume inference num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
- } catch (error) {
+ // try {
+ // let argMaxLargeTime = performance.now();
+ // console.log(" tf.argMax failed .. try argMaxLarge ..");
+ // let modelOutBuffer = tensor2LightBuffer(curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), 'float16');
+ // prediction_argmax = argMaxLarge(modelOutBuffer, num_of_slices, slice_height, slice_width, expected_Num_labels, 'float16');
+ // console.log("argMaxLarge for fullVolume takes : ", ((performance.now() - argMaxLargeTime)/1000).toFixed(4) );
- //-- Timing data to collect
- tf.engine().endScope();
- tf.engine().disposeVariables();
+ // } catch(err2) {
- webix.alert("Failed while generating output due to limited browser memory available");
+ // let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
+ // webix.alert(errTxt);
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = error.message;
- statData["Extra_Err_Info"] = "Failed while generating output";
+ // prediction_argmax.dispose();
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ // window.clearInterval( timer );
+ // tf.engine().endScope();
+ // tf.engine().disposeVariables();
- return 0;
- }
+ // statData["Inference_t"] = Infinity;
+ // statData["Postprocess_t"] = Infinity;
+ // statData["Status"] = "Fail";
+ // statData["Error_Type"] = err2.message;
+ // statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge failed";
- let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
+ // if(opts.telemetryFlag) {
+ // submitTiming2GoogleSheet(statData);
+ // }
- document.getElementById("progressBar").style.width = 0;
- //webix.message.hide("waitMessage");
+ // return 0;
- $$("downloadBtn").enable();
- $$("segmentBtn").enable();
- // $$("imageUploader").enable();
- tf.engine().endScope();
- tf.engine().disposeVariables();
+ // }
- console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
- ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
+ // } else {
+ // // if channel first ..
+ // let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
+ // webix.alert(errTxt);
+ // prediction_argmax.dispose();
- //-- Timing data to collect
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Postprocess_t;
- statData["Status"] = "OK";
+ // window.clearInterval( timer );
+ // tf.engine().endScope();
+ // tf.engine().disposeVariables();
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ // statData["Inference_t"] = Infinity;
+ // statData["Postprocess_t"] = Infinity;
+ // statData["Status"] = "Fail";
+ // statData["Error_Type"] = err1.message;
+ // statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge not support yet channel first";
- }
- i++;
+ // if(opts.telemetryFlag) {
+ // submitTiming2GoogleSheet(statData);
+ // }
+
+ // return 0;
+ // }
+
+ // }
+
+
+
+ // console.log(" prediction_argmax shape : ", prediction_argmax.shape);
+ // //-- prediction_argmax.shape : [ 1, 256, 256, 256]
+
+ // let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
+
+ // //outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
+ // tf.dispose(curTensor[i]);
+ // // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
+ // console.log(" find array max ");
+ // let curBatchMaxLabel = findArrayMax(Array.from(prediction_argmax.dataSync()));
+
+ // if( maxLabelPredicted < curBatchMaxLabel ) {
+ // maxLabelPredicted = curBatchMaxLabel;
+ // }
+
+ // let numSegClasses = maxLabelPredicted + 1;
+ // console.log("numSegClasses", numSegClasses);
+ // statData["Actual_Labels"] = numSegClasses;
+ // statData["Expect_Labels"] = expected_Num_labels;
+ // statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
+
+ // //-- Transpose back to fit Papaya display settings
+ // let outLabelVolume = prediction_argmax.reshape([num_of_slices, slice_height, slice_width]);
+ // tf.dispose(prediction_argmax);
+
+ // // Transpose MRI data to be match pytorch/keras input output
+ // if(transpose) {
+ // console.log("outLabelVolume transposed");
+ // outLabelVolume = outLabelVolume.transpose();
+ // }
+
+ // let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
+ // tf.dispose(outLabelVolume);
+
+ // startTime = performance.now();
+ // // Generate output volume or slices
+ // console.log("Generating output");
+
+ // try {
+ // generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
+ // console.log(" FullVolume inference num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
+ // } catch (error) {
+
+ // //-- Timing data to collect
+ // tf.engine().endScope();
+ // tf.engine().disposeVariables();
+
+ // webix.alert("Failed while generating output due to limited browser memory available");
+
+ // statData["Inference_t"] = Inference_t;
+ // statData["Postprocess_t"] = Infinity;
+ // statData["Status"] = "Fail";
+ // statData["Error_Type"] = error.message;
+ // statData["Extra_Err_Info"] = "Failed while generating output";
+
+ // if(opts.telemetryFlag) {
+ // submitTiming2GoogleSheet(statData);
+ // }
+
+ // return 0;
+ // }
+
+ // let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
+
+ // document.getElementById("progressBar").style.width = 0;
+ // //webix.message.hide("waitMessage");
+
+ // $$("downloadBtn").enable();
+ // $$("segmentBtn").enable();
+ // // $$("imageUploader").enable();
+ // tf.engine().endScope();
+ // tf.engine().disposeVariables();
+
+ // console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
+ // ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
+
+
+ // //-- Timing data to collect
+ // statData["Inference_t"] = Inference_t;
+ // statData["Postprocess_t"] = Postprocess_t;
+ // statData["Status"] = "OK";
+
+ // if(opts.telemetryFlag) {
+ // submitTiming2GoogleSheet(statData);
+ // }
+
+ // }
+
+ // i++;
}, delay);
@@ -5970,16 +4726,27 @@ get3dObjectBoundingVolume = async(slices_3d) => {
statData["Extra_Info"] = null;
+ // Determine the number of output channels in the last layer of the model
+ // e.g. 3, 50, 104
+ const outputLayer = res.layers[res.layers.length - 1];
+ console.log("Output Layer : ", outputLayer);
+
+ const expected_Num_labels = isChannelLast ?
+ outputLayer.outputShape[outputLayer.outputShape.length - 1]:
+ outputLayer.outputShape[1];
+ console.log("Num of output channels : ", expected_Num_labels);
+
+
+
let curTensor = [];
curTensor[0] = cropped_slices_3d_w_pad.reshape(adjusted_input_shape);
// console.log("curTensor[0] :", curTensor[0].dataSync());
let curProgBar = parseInt(document.getElementById("progressBar").style.width);
- let timer = window.setInterval(function() {
+ let timer = window.setInterval(async function() {
- try {
- //-- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
+ try {
if (res.layers[i].activation.getClassName() !== 'linear') {
curTensor[i] = res.layers[i].apply( curTensor[i-1]);
} else {
@@ -5992,6 +4759,15 @@ get3dObjectBoundingVolume = async(slices_3d) => {
res.layers[i].dilationRate,
3); // important for memory use
}
+ // Log memory usage
+ const memoryInfo = tf.memory();
+ console.log(`Iteration ${i}:`);
+ console.log(`Number of Tensors: ${memoryInfo.numTensors}`);
+ console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`);
+ console.log(`Bytes In Use: ${memoryInfo.numBytes}`);
+ console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`);
+ console.log(`Unreliable: ${memoryInfo.unreliable}`);
+ tf.dispose(curTensor[i-1]);
} catch(err) {
@@ -6036,7 +4812,7 @@ get3dObjectBoundingVolume = async(slices_3d) => {
}
console.log("layer ", i);
- console.log("layer output Tensor shape : ", curTensor[i].shape);
+ console.log("layer output Tenosr shape : ", curTensor[i].shape);
console.log("layer count params ", res.layers[i].countParams());
res.layers[i].dispose();
@@ -6049,193 +4825,157 @@ get3dObjectBoundingVolume = async(slices_3d) => {
document.getElementById("memoryStatus").style.backgroundColor = memStatus;
- if( i == layersLength - 1) {
- window.clearInterval( timer );
-
- // prediction = res.layers[res.layers.length-1].apply(curTensor[i]);
- // curTensor[i].print();
- //outputDataBeforArgmx = Array.from(curTensor[i].dataSync())
-
- let axis = isChannelLast ? -1 : 1;
- console.log(" find argmax ")
- console.log("last Tensor shape : ", curTensor[i].shape);
- //-- curTensor[i].shape e.g. [ 1, 256, 256, 256, 3 ]
- let expected_Num_labels = isChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1];
- let prediction_argmax;
-
- // Try for argMax with model output tensor.
-
- try {
- let argMaxTime = performance.now();
- console.log(" Try tf.argMax for fullVolume ..");
- prediction_argmax = tf.argMax(curTensor[i], axis);
- console.log("tf.argMax for fullVolume takes : ", ((performance.now() - argMaxTime)/1000).toFixed(4) );
-
- } catch(err1) {
- // if channel last
- if(axis == -1) {
-
- try {
- let argMaxLargeTime = performance.now();
- console.log(" tf.argMax failed .. try argMaxLarge ..");
- let modelOutBuffer = tensor2LightBuffer(curTensor[i].reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2], expected_Num_labels]), 'float16');
- prediction_argmax = argMaxLarge(modelOutBuffer, cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2], expected_Num_labels, 'float16');
- console.log("argMaxLarge for fullVolume takes : ", ((performance.now() - argMaxLargeTime)/1000).toFixed(4) );
-
- } catch(err2) {
+ if( i == layersLength - 2) { //Stop before the last layer or classification layer.
- let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
- webix.alert(errTxt);
+ // Check if the output layer has more than 3 channels
+ if( expected_Num_labels > 1) { //Stop before the last layer or classification layer.
+ window.clearInterval( timer );
- window.clearInterval( timer );
- tf.engine().endScope();
- tf.engine().disposeVariables();
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err2.message;
- statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge failed";
+ // // Create an instance of SequentialConvLayer
+ //The second parameter is important for memory,
+ // the larger it is, the more memory it uses
+ // it was 8, but I set it to 3, got a different error
+ seqConvLayer = new SequentialConvLayer(res, 4, isChannelLast);
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
- return 0;
+ // Apply the last output tensor to the seq. instance
+ let outputTensor = null;
- }
+ const profileInfo = await tf.profile(async() => {
+ // Your tensor operations here
+ outputTensor = await seqConvLayer.apply(curTensor[i]);
+ });
- } else {
- // if channel first ..
- let errTxt = "argMax buffer couldn't be created due to limited memory resources.";
- webix.alert(errTxt);
+ console.log("profileInfo : ",profileInfo);
- prediction_argmax.dispose();
+ // breakpoint()
+ // 'outputTensor' is the result tensor after applying the operation
- window.clearInterval( timer );
- tf.engine().endScope();
- tf.engine().disposeVariables();
- statData["Inference_t"] = Infinity;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = err1.message;
- statData["Extra_Err_Info"] = "prediction_argmax from argMaxLarge not support yet channel first";
+ // Dispose the previous layer input tensor
+ tf.dispose(curTensor[i]);
+ // delete the used class
+ delete seqConvLayer;
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ // You can now use 'outputTensor' as needed
+ console.log(outputTensor);
+ console.log(" Output tensor shape : ", outputTensor.shape);
+ // Array(3) [ 256, 256, 256 ]
- return 0;
+ if(outputTensor.shape.length != 3) {
+ webix.alert("Output tensor shape should be 3 dims but it is " + outputTensor.shape.length, "alert-error");
}
- }
+ let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
+ console.log(" find array max ");
+ let curBatchMaxLabel = findArrayMax(Array.from(outputTensor.dataSync()));
- console.log(" prediction_argmax shape : ", prediction_argmax.shape);
- //-- prediction_argmax.shape : [ 1, 256, 256, 256]
+ if( maxLabelPredicted < curBatchMaxLabel ) {
+ maxLabelPredicted = curBatchMaxLabel;
+ }
- let Inference_t = ((performance.now() - startTime)/1000).toFixed(4);
+ let numSegClasses = maxLabelPredicted + 1;
+ console.log("Predicted num of segmentation classes", numSegClasses);
+ statData["Actual_Labels"] = numSegClasses;
+ statData["Expect_Labels"] = expected_Num_labels;
+ statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
- //outputDataBeforArgmx = Array.from(prediction_argmax.dataSync())
- tf.dispose(curTensor[i]);
- // allPredictions.push({"id": allBatches[j].id, "coordinates": allBatches[j].coordinates, "data": Array.from(prediction_argmax.dataSync()) })
- console.log(" find array max ");
- let curBatchMaxLabel = findArrayMax(Array.from(prediction_argmax.dataSync()));
+ if( numSegClasses != expected_Num_labels ) {
+ webix.alert("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses, "alert-error");
+ console.log("expected " + expected_Num_labels + " labels, but the predicted are " + numSegClasses);
+ }
- if( maxLabelPredicted < curBatchMaxLabel ) {
- maxLabelPredicted = curBatchMaxLabel;
- }
+ //-- Transpose back to fit Papaya display settings
+ let outLabelVolume = outputTensor.reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2]]);
+ tf.dispose(outputTensor);
- let numSegClasses = maxLabelPredicted + 1;
- console.log("numSegClasses", numSegClasses);
- statData["Actual_Labels"] = numSegClasses;
- statData["Expect_Labels"] = expected_Num_labels;
- statData["NumLabels_Match"] = numSegClasses == expected_Num_labels? true : false;
+ // Transpose MRI data to be match pytorch/keras input output
+ if(transpose) {
+ console.log("outLabelVolume transposed");
+ outLabelVolume = outLabelVolume.transpose();
+ }
- //-- Transpose back to fit Papaya display settings
- let outLabelVolume = prediction_argmax.reshape([cropped_slices_3d_w_pad.shape[0], cropped_slices_3d_w_pad.shape[1], cropped_slices_3d_w_pad.shape[2]]);
- tf.dispose(prediction_argmax);
+ outLabelVolume = removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad);
+ console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape);
+ outLabelVolume = resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
+ console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape);
- // Transpose MRI data to be match pytorch/keras input output
- if(transpose) {
- console.log("outLabelVolume transposed");
- outLabelVolume = outLabelVolume.transpose();
- }
+ let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"];
- outLabelVolume = removeZeroPaddingFrom3dTensor(outLabelVolume, pad, pad, pad);
- console.log(" outLabelVolume without padding shape : ", outLabelVolume.shape);
- outLabelVolume = resizeWithZeroPadding(outLabelVolume, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr );
- console.log(" outLabelVolume final shape after resizing : ", outLabelVolume.shape);
+ // To clean the skull area wrongly segmented inphase-2.
+ if(pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
+ outLabelVolume = outLabelVolume.mul(binarizeVolumeDataTensor(pipeline1_out));
+ }
- let filterOutWithPreMask = inferenceModelsList[$$("selectModel").getValue() - 1]["filterOutWithPreMask"];
- // To clean the skull area wrongly segmented in phase-2.
- if(pipeline1_out != null && opts.isBrainCropMaskBased && filterOutWithPreMask) {
- outLabelVolume = outLabelVolume.mul(binarizeVolumeDataTensor(pipeline1_out));
- }
+ let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
+ tf.dispose(outLabelVolume);
- let unstackOutVolumeTensor = tf.unstack(outLabelVolume);
- tf.dispose(outLabelVolume);
+ startTime = performance.now();
+ // Generate output volume or slices
+ console.log("Generating output");
- startTime = performance.now();
- // Generate output volume or slices
- console.log("Generating output");
+ try {
- try {
+ generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
+ console.log(" Phase-2 num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
- generateOutputSlicesV2(unstackOutVolumeTensor , num_of_slices, numSegClasses, slice_height, slice_width);
- console.log(" Phase-2 num of tensors after generateOutputSlicesV2: " , tf.memory().numTensors );
+ } catch (error) {
- } catch (error) {
+ //-- Timing data to collect
+ tf.engine().endScope();
+ tf.engine().disposeVariables();
+ console.log("Error while generating output: ", error)
- //-- Timing data to collect
- tf.engine().endScope();
- tf.engine().disposeVariables();
+ webix.alert("Failed while generating output due to limited browser memory available");
- webix.alert("Failed while generating output due to limited browser memory available");
+ statData["Inference_t"] = Inference_t;
+ statData["Postprocess_t"] = Infinity;
+ statData["Status"] = "Fail";
+ statData["Error_Type"] = error.message;
+ statData["Extra_Err_Info"] = "Failed while generating output";
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Infinity;
- statData["Status"] = "Fail";
- statData["Error_Type"] = error.message;
- statData["Extra_Err_Info"] = "Failed while generating output";
+ if(opts.telemetryFlag) {
+ submitTiming2GoogleSheet(statData);
+ }
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
- }
+ return 0;
+ }
- return 0;
- }
+ let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
- let Postprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
+ document.getElementById("progressBar").style.width = 0;
+ //webix.message.hide("waitMessage");
- document.getElementById("progressBar").style.width = 0;
- //webix.message.hide("waitMessage");
+ $$("downloadBtn").enable();
+ $$("segmentBtn").enable();
+ // $$("imageUploader").enable();
+ tf.engine().endScope();
+ tf.engine().disposeVariables();
- $$("downloadBtn").enable();
- $$("segmentBtn").enable();
- // $$("imageUploader").enable();
- tf.engine().endScope();
- tf.engine().disposeVariables();
+ console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
+ ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
- console.log("Processing the whole brain volume in tfjs tooks for multi-class output mask : ",
- ((performance.now()-inferenceStartTime)/1000).toFixed(4) + " Seconds");
+ //-- Timing data to collect
+ statData["Inference_t"] = Inference_t;
+ statData["Postprocess_t"] = Postprocess_t;
+ statData["Status"] = "OK";
- //-- Timing data to collect
- statData["Inference_t"] = Inference_t;
- statData["Postprocess_t"] = Postprocess_t;
- statData["Status"] = "OK";
+ if(opts.telemetryFlag) {
+ submitTiming2GoogleSheet(statData);
+ }
- if(opts.telemetryFlag) {
- submitTiming2GoogleSheet(statData);
}
+ } else {
+
+ i++;
}
- i++;
}, delay);
@@ -6301,7 +5041,7 @@ checkInferenceModelList = () => {
//-- The mask is needed to remove the skull and set noise in background to 0, and get the brain bounding volume properly
let slices_3d_mask = null;
- // load pre-model for inference first, can be null if no pre-model such as GWM models
+ // load pre-model for inference first
if(modelEntry["preModelId"]) {
preModel = load_model(inferenceModelsList[ modelEntry["preModelId"] - 1]['path'] );
@@ -6394,18 +5134,7 @@ checkInferenceModelList = () => {
let timer = window.setInterval(function() {
try {
- if (res.layers[i].activation.getClassName() !== 'linear') {
- curTensor[i] = res.layers[i].apply( curTensor[i-1]);
- } else {
-
- curTensor[i] = convByOutputChannelAndInputSlicing(curTensor[i-1],
- res.layers[i].getWeights()[0],
- res.layers[i].getWeights()[1],
- res.layers[i].strides,
- res.layers[i].padding,
- res.layers[i].dilationRate,
- 3); // important for memory use
- }
+ curTensor[i] = res.layers[i].apply( curTensor[i-1]);
} catch(err) {
if( err.message === "Failed to compile fragment shader.") {
@@ -6469,7 +5198,7 @@ checkInferenceModelList = () => {
let axis = isPreModelChannelLast ? -1 : 1;
console.log(" find argmax ")
- console.log("last Tensor shape : ", curTensor[i].shape);
+ console.log("last Tenosr shape : ", curTensor[i].shape);
//-- curTensor[i].shape : [ 1, 256, 256, 256, 3 ]
let expected_Num_labels = isPreModelChannelLast ? curTensor[i].shape[4] : curTensor[i].shape[1];
let prediction_argmax;
@@ -6595,6 +5324,8 @@ checkInferenceModelList = () => {
tf.engine().endScope();
tf.engine().disposeVariables();
+ console.log("Error while generating Brain Mask: ", error)
+
webix.alert("Failed while generating pre-model output due to limited browser memory available");
statData["Inference_t"] = Inference_t;
@@ -6639,23 +5370,9 @@ checkInferenceModelList = () => {
console.log("--- pre-model done ---");
// --mask_3d = slices_3d_mask.greater([0]).asType('bool');
// --slices_3d_mask.dispose();
-
if(isModelFullVol) {
-
- if(modelEntry["enableSeqConv"]) {
- // Mask cropping & seq conv
- // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
- // Sequential convolution layer to be used after cropping - slow but reliable on most machines
- console.log("------ Mask Cropping & Seq Convoluton ------");
- inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask);
- } else {
- // Mask cropping BUT no seq conv
- console.log("------ Mask Cropping - NO Seq Convoluton ------");
- inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask);
- }
-
+ inferenceFullVolumePhase2(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask);
} else {
- // -- In version 3.0.0 this function not used
inferenceSubVolumes(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask);
}
@@ -6683,28 +5400,15 @@ checkInferenceModelList = () => {
});
//-- if(...) end
- } else { // No preModel
+ } else {
//--Phase-2, After remove the skull try to allocate brain volume and make inferece
console.log("--- No pre-model is selected ---");
- console.log("------ Run voxel cropping ------");
//-- mask_3d = slices_3d.greater([0]).asType('bool');
if(isModelFullVol) {
-
- if(modelEntry["enableSeqConv"]) {
- // Voxel cropping & seq conv
- // Non-Atlas model (e.g. GWM) needs sequential convolution layer.
- // Sequential convolution layer to be used after cropping - slow but reliable on most machines
- console.log("------ Seq Convoluton ------");
- inferenceFullVolumeSeqCovLayerPhase2(model, slices_3d, num_of_slices, slice_height, slice_width, null);
- } else {
- // Voxel cropping BUT no seq conv
- inferenceFullVolumePhase2(model, slices_3d, num_of_slices, slice_height, slice_width, null);
- }
-
+ inferenceFullVolumePhase2(model, slices_3d, num_of_slices, slice_height, slice_width, null);
} else {
- // -- In version 3.0.0 this function not used
inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, null);
}
}
@@ -6764,23 +5468,23 @@ resetMainParameters = () => {
*/
runInference = async() => {
- let startTime = performance.now();
+ let startTime = performance.now();
- const batchSize = opts.batchSize;
- const numOfChan = opts.numOfChan;
+ const batchSize = opts.batchSize;
+ const numOfChan = opts.numOfChan;
- if (isNaN(batchSize) || batchSize != 1) {
+ if (isNaN(batchSize) || batchSize != 1) {
webix.alert("The batch Size for input shape must be 1");
return 0;
- }
+ }
- if (isNaN(numOfChan) || (numOfChan != 1)) {
+ if (isNaN(numOfChan) || (numOfChan != 1)) {
webix.alert("The number of channels for input shape must be 1");
return 0;
- }
+ }
tf.engine().startScope()
@@ -6800,7 +5504,7 @@ resetMainParameters = () => {
let modelObject = {};
// get model object data e.g. layers etc
- model.then(function(res) {
+ model.then(async function(res) {
modelObject = res;
let batchInputShape = [];
@@ -6883,23 +5587,18 @@ resetMainParameters = () => {
let allSlices_2D = getAllSlices2D(allSlices, slice_height, slice_width);
- // free array from mem
- allSlices = null;
-
// Get slices_3d tensor
let slices_3d = getSlices3D(allSlices_2D);
-
- // free tensor from mem
tf.dispose(allSlices_2D);
// Nomalize MRI data to be from 0 to 1
- slices_3d = normalizeVolumeData(slices_3d);
- // Another normalize function needs specific models to be used
- // -- slices_3d = await normalizeTensor(slices_3d);
-
+ //slices_3d = normalizeVolumeData(slices_3d);
+ slices_3d = await normalizeTensor(slices_3d);
let Preprocess_t = ((performance.now() - startTime)/1000).toFixed(4);
+
+
console.log(tf.getBackend());
//-- set this flag so that textures are deleted when tensors are disposed.
tf.env().set("WEBGL_DELETE_TEXTURE_THRESHOLD", 0);
@@ -6909,7 +5608,9 @@ resetMainParameters = () => {
console.log("tf env total features: ", Object.keys(tf.env().features).length);
// tf.env().set('WEBGL_PACK', false);
- // enableProductionMode();
+
+ // enableProductionMode();
+
//-- Timing data to collect
let today = new Date();
@@ -7000,62 +5701,33 @@ resetMainParameters = () => {
let transpose = inferenceModelsList[$$("selectModel").getValue() - 1]["enableTranspose"];
let enableCrop = inferenceModelsList[$$("selectModel").getValue() - 1]["enableCrop"];
-
if (isModelFullVol) {
if( enableCrop) {
- // FullVolume with Crop option before inference ..
- // pre-model to mask the volume, can also be null and the cropping will be on the MRI.
+ //-- FullVolume with Crop option before inference ..
+ //--pre-model to mask the volume, can also be null and the cropping will be on the MRI.
inferenceFullVolumePhase1(model, slices_3d, num_of_slices, slice_height, slice_width, isModelFullVol);
} else {
// Transpose MRI data to be match pytorch/keras input output
- console.log("Cropping Disabled");
+ webix.message("Cropping disabled");
+ console.log("Cropping disabled");
if(transpose) {
slices_3d = slices_3d.transpose()
console.log("Input transposed");
} else {
- console.log("Transpose NOT Enabled");
+ console.log("Transpose not enabled");
}
- let enableSeqConv = inferenceModelsList[$$("selectModel").getValue() - 1]["enableSeqConv"];
-
- if(enableSeqConv) {
- console.log("Seq Convoluton Enabled");
- inferenceFullVolumeSeqCovLayer(model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width);
- } else {
- console.log("Seq Convoluton Disabled");
- inferenceFullVolume(model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width);
- }
-
-
+ inferenceFullVolume(model, slices_3d, input_shape, isChannelLast, num_of_slices, slice_height, slice_width);
}
-
} else {
- // // In version 3.0.0 this function not used
- //-- if(enableCrop) {
- // // FullVolume with Crop option before inference ..
- // // pre-model to mask the volume, can also be null and the cropping will be on the MRI.
- //-- inferenceFullVolumePhase1(model, slices_3d, num_of_slices, slice_height, slice_width, isModelFullVol);
- //-- } else {
- // // Transpose MRI data to be match pytorch/keras input output
- //-- if(transpose) {
- //-- slices_3d = slices_3d.transpose()
- //-- console.log("Input transposed");
- //-- } else {
- //-- console.log("Transpose not enabled");
- //-- }
-
- //-- inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width);
- //-- }
-
- console.log("This is not a full volume model");
- webix.alert({title: "", text: "This is not a full volume model", type:"alert-error"});
-
- }
+ console.log("This is not full volume model");
+ webix.alert({title: "", text: "This is not full volume model", type:"alert-error"});
- }) //-- End of model.then
+ }
+ })
} //-- End of runInference
diff --git a/js/brainchop/mainParameters.js b/js/brainchop/mainParameters.js
index 4ef6cd3..e04f4df 100644
--- a/js/brainchop/mainParameters.js
+++ b/js/brainchop/mainParameters.js
@@ -1,6 +1,6 @@
/*
=========================================================
-* Brainchop - v3.0.0
+* Brainchop - v2.2.0 TESTING
=========================================================
* Discription: A user interface for whole brain segmentation
@@ -69,7 +69,7 @@
bgLabelValue: 0, // Semenatic Segmentation background label value
drawBoundingVolume: false, // plot bounding volume used to crop the brain
- isBrainCropMaskBased: true, // Check if brain masking will be used for cropping & optional show or brain tissue will be used
+ isBrainCropMaskBased: false, // Check if brain masking will be used for cropping & optional show or brain tissue will be used
showPhase1Output: false, // This will load to papaya the output of phase-1 (ie. brain mask or brain tissue)
isPostProcessEnable: true, // If true 3D Connected Components filter will apply
@@ -101,6 +101,8 @@
// Inference Models, the ids must start from 1 in sequence
var inferenceModelsList = [
+
+
{
id: 1,
type: "Segmentation",
@@ -115,14 +117,13 @@
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
textureSize: 9159, // Requested Texture size for the model, if unknown can be 0.
warning: null, // Warning message to show when select the model.
inferenceDelay: 100, // Delay in ms time while looping layers applying.
description: "Gray and white matter segmentation model. Operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the subvolume model."
- }
+ },
- ,{
+ {
id: 2,
type: "Segmentation",
path:"./models/model11_gw_ae/model.json",
@@ -136,141 +137,35 @@
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
textureSize: 13585, // Requested Texture size for the model, if unknown can be 0.
warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
inferenceDelay: 100, // Delay in ms time while looping layers applying.
description: "Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy among the provided models."
- }
-
- ,{
- id: 3,
- type: "Brain_Extraction",
- path: "./models/model5_gw_ae/model.json",
- modelName:"Extract the Brain (FAST)",
- labelsPath: null,
- colorsPath: null,
- preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 0, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 9159, // Requested Texture size for the model, if unknown can be 0.
- warning: null, // Warning message to show when select the model.
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."
- }
-
- ,{
- id: 4,
- type: "Brain_Extraction",
- path: "./models/model11_gw_ae/model.json",
- modelName:"Extract the Brain (High Acc)",
- labelsPath: null,
- colorsPath: null,
- preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 0, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 13585, // Requested Texture size for the model, if unknown can be 0.
- warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."
- }
-
- ,{
- id: 5,
- type: "Brain_Masking",
- path: "./models/model5_gw_ae/model.json",
- modelName:"Compute Brain Mask (FAST)",
- labelsPath: null,
- colorsPath: null,
- preModelId: null,// Model run first e.g. crop the brain { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 0, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 9159, // Requested Texture size for the model, if unknown can be 0.
- warning: null, // Warning message to show when select the model.
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."
- }
+ },
- ,{
- id: 6,
- type: "Brain_Masking",
- path: "./models/model11_gw_ae/model.json",
- modelName:"Compute Brain Mask (High Acc)",
- labelsPath: null,
- colorsPath: null,
- preModelId: null,// Model run first e.g. crop the brain { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 0, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 13585, // Requested Texture size for the model, if unknown can be 0.
- warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .",
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."
- }
- ,{
- id: 7,
+
+ {
+ id: 3,
type: "Atlas",
path:"./models/model11_50class/model.json",
modelName:"Cortical Atlas 50",
labelsPath: "./models/model11_50class/labels.json",
colorsPath: "./models/model11_50class/colorLUT.json",
- preModelId: 5,// Model run first e.g. crop the brain { null, 1, 2, .. }
+ preModelId: 1,// Model run first e.g. crop the brain { null, 1, 2, .. }
isBatchOverlapEnable: false, //create extra overlap batches for inference
numOverlapBatches: 200, //Number of extra overlap batches for inference
enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
inferenceDelay: 100, // Delay in ms time while looping layers applying.
description: "Parcellate cortical areas into 50 regions."
- }
-
- ,{
- id: 8,
- type: "Atlas",
- path:"./models/model11_50class/model.json",
- modelName:"Cortical Atlas 50 (failsafe)",
- labelsPath: "./models/model11_50class/labels.json",
- colorsPath: "./models/model11_50class/colorLUT.json",
- preModelId: 5,// Model run first e.g. crop the brain { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 200, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
- warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "Parcellate cortical areas into 50 regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."
- }
-
- ,{
- id: 9,
+ }
+ ,{
+ id: 4,
type: "Atlas",
path:"./models/model21_104class/model.json",
modelName:"FS aparc+aseg Atlas 104",
@@ -283,36 +178,13 @@
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 2, // Padding size add to cropped brain
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
- textureSize: 18121, // Requested Texture size for the model, if unknown can be 0.
- warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
- inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."
- }
-
- ,{
- id: 10,
- type: "Atlas",
- path:"./models/model21_104class/model.json",
- modelName:"FS aparc+aseg Atlas 104 (failsafe)",
- labelsPath: "./models/model21_104class/labels.json",
- colorsPath: "./models/model21_104class/colorLUT.json",
- preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. }
- isBatchOverlapEnable: false, //create extra overlap batches for inference
- numOverlapBatches: 200, //Number of extra overlap batches for inference
- enableTranspose : true, // Keras and tfjs input orientation may need a tranposing step to be matched
- enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
- cropPadding: 2, // Padding size add to cropped brain
- filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
- enableSeqConv: true, // For low memory system and low configuration, enable sequential convolution instead of last layer
textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
warning: "This model may need dedicated graphics card. For more info please check with Browser Resources .", // Warning message to show when select the model.
inferenceDelay: 100, // Delay in ms time while looping layers applying.
- description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "
- }
-
-
-
+ description: "FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."
+ }
+
+
];