Skip to content

Commit

Permalink
Improving normalization by reducing memory allocations
Browse files Browse the repository at this point in the history
* using `np.ndarray.astype(..., copy=False)` to prevent new array creation when the types are the same
* using inplace numpy matrix operations
* using inplace `np.clip`
ancestor-mithril committed Jan 11, 2024

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature. The key has expired.
1 parent 947eafb commit e5644ea
Showing 1 changed file with 14 additions and 11 deletions.
Original file line number Diff line number Diff line change
@@ -32,7 +32,7 @@ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
here seg is used to store the zero valued region. The value for that region in the segmentation is -1 by
default.
"""
image = image.astype(self.target_dtype)
image = image.astype(self.target_dtype, copy=False)
if self.use_mask_for_norm is not None and self.use_mask_for_norm:
# negative values in the segmentation encode the 'outside' region (think zero values around the brain as
# in BraTS). We want to run the normalization only in the brain region, so we need to mask the image.
@@ -45,7 +45,8 @@ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
else:
mean = image.mean()
std = image.std()
image = (image - mean) / (max(std, 1e-8))
image -= mean
image /= (max(std, 1e-8))
return image


@@ -54,30 +55,32 @@ class CTNormalization(ImageNormalization):

def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
assert self.intensityproperties is not None, "CTNormalization requires intensity properties"
image = image.astype(self.target_dtype)
mean_intensity = self.intensityproperties['mean']
std_intensity = self.intensityproperties['std']
lower_bound = self.intensityproperties['percentile_00_5']
upper_bound = self.intensityproperties['percentile_99_5']
image = np.clip(image, lower_bound, upper_bound)
image = (image - mean_intensity) / max(std_intensity, 1e-8)

image = image.astype(self.target_dtype, copy=False)
np.clip(image, lower_bound, upper_bound, out=image)
image -= mean_intensity
image /= max(std_intensity, 1e-8)
return image


class NoNormalization(ImageNormalization):
leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False

def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
return image.astype(self.target_dtype)
return image.astype(self.target_dtype, copy=False)


class RescaleTo01Normalization(ImageNormalization):
leaves_pixels_outside_mask_at_zero_if_use_mask_for_norm_is_true = False

def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
image = image.astype(self.target_dtype)
image = image - image.min()
image = image / np.clip(image.max(), a_min=1e-8, a_max=None)
image = image.astype(self.target_dtype, copy=False)
image -= image.min()
image /= np.clip(image.max(), a_min=1e-8, a_max=None)
return image


@@ -89,7 +92,7 @@ def run(self, image: np.ndarray, seg: np.ndarray = None) -> np.ndarray:
"Your images do not seem to be RGB images"
assert image.max() <= 255, "RGB images are uint 8, for whatever reason I found pixel values greater than 255" \
". Your images do not seem to be RGB images"
image = image.astype(self.target_dtype)
image = image / 255.
image = image.astype(self.target_dtype, copy=False)
image /= 255.
return image

0 comments on commit e5644ea

Please sign in to comment.