Skip to content

Commit

Permalink
adding precommit hook (#132)
Browse files Browse the repository at this point in the history
undefined
  • Loading branch information
dkazanc authored Jul 10, 2024
1 parent 83b05be commit 053e780
Show file tree
Hide file tree
Showing 19 changed files with 152 additions and 100 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/conda_upload.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:

# install dependencies with conda
- name: Install dependencies with conda
run: |
run: |
$CONDA/bin/conda env create --name httomolibgpu --file conda/environment.yml
$CONDA/bin/conda run -n httomolibgpu pip install -e .
$CONDA/bin/conda list
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/tests_run.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,5 +41,5 @@ jobs:
micromamba list
- name: Run tests
run: |
run: |
pytest tests/
34 changes: 34 additions & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
repos:
- repo: https://github.com/psf/black
rev: 23.12.1
hooks:
- id: black
language_version: python3
exclude: ^docs/source/examples/
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.5.0
hooks:
- id: trailing-whitespace # This hook trims trailing whitespace.
- id: check-docstring-first # Checks a common error of defining a docstring after code.
- id: check-merge-conflict # Check for files that contain merge conflict strings.
- id: check-yaml # This hook checks yaml files for parseable syntax.
- id: detect-private-key # Detects the presence of private keys.
- id: check-symlinks
- id: check-toml
- repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.10.0
hooks:
- id: python-use-type-annotations
- repo: https://github.com/kynan/nbstripout
rev: 0.6.1
hooks:
- id: nbstripout
exclude: ^docs/source/examples/
- repo: https://github.com/asottile/blacken-docs
rev: 1.16.0
hooks:
- id: blacken-docs
- repo: https://github.com/nbQA-dev/nbQA
rev: 1.7.1
hooks:
- id: nbqa-black
12 changes: 6 additions & 6 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@ HTTomolibGPU is a library of GPU accelerated methods for tomography
--------------------------------------------------------------------

**HTTomolibGPU** is a collection of image processing methods in Python for computed tomography.
The methods are GPU-accelerated with the open-source Python library `CuPy <https://cupy.dev/>`_. Most of the
The methods are GPU-accelerated with the open-source Python library `CuPy <https://cupy.dev/>`_. Most of the
methods migrated from `TomoPy <https://tomopy.readthedocs.io/en/stable/>`_ and `Savu <https://savu.readthedocs.io/en/latest/>`_ software packages.
Some of the methods also have been optimised to ensure higher computational efficiency, before ported to CuPy.

The purpose of HTTomolibGPU
===========================

**HTTomolibGPU** can be used as a stand-alone library, see Examples section in `Documentation <https://diamondlightsource.github.io/httomolibgpu/>`_.
However, it has been specifically developed to work together with the `HTTomo <https://diamondlightsource.github.io/httomo/>`_ package as
its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
MPI protocols.
**HTTomolibGPU** can be used as a stand-alone library, see Examples section in `Documentation <https://diamondlightsource.github.io/httomolibgpu/>`_.
However, it has been specifically developed to work together with the `HTTomo <https://diamondlightsource.github.io/httomo/>`_ package as
its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
MPI protocols.

Install HTTomolibGPU as a pre-built conda Python package
=========================================================
Expand All @@ -28,7 +28,7 @@ Setup the development environment:
==================================

.. code-block:: console
$ git clone [email protected]:DiamondLightSource/httomolibgpu.git # clone the repo
$ conda env create --name httomolibgpu --file conda/environment.yml # install dependencies
$ conda activate httomolibgpu # activate the environment
Expand Down
26 changes: 16 additions & 10 deletions docs/source/examples/Cor_largesino.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,12 @@
"\n",
"# Load the sinogram data\n",
"path_lib = os.path.dirname(httomolibgpu.__file__)\n",
"in_file = os.path.abspath(os.path.join(path_lib, '..', \"tests/test_data/\",\"3600proj_sino.npz\"))\n",
"in_file = os.path.abspath(\n",
" os.path.join(path_lib, \"..\", \"tests/test_data/\", \"3600proj_sino.npz\")\n",
")\n",
"l_infile = np.load(in_file)\n",
"sinogram = l_infile['sinogram']\n",
"angles = l_infile['angles']\n",
"sinogram = l_infile[\"sinogram\"]\n",
"angles = l_infile[\"angles\"]\n",
"sinogram = cp.asarray(sinogram)\n",
"\n",
"print(\"The shape of the sinogram is {}\".format(cp.shape(sinogram)))"
Expand Down Expand Up @@ -95,7 +97,8 @@
],
"source": [
"from httomolibgpu.recon.rotation import find_center_vo\n",
"print (\"Finding the Center of Rotation for the reconstruction\")\n",
"\n",
"print(\"Finding the Center of Rotation for the reconstruction\")\n",
"cor = find_center_vo(sinogram)\n",
"print(\"The found Center of Rotation is {}\".format(cor))"
]
Expand Down Expand Up @@ -126,15 +129,18 @@
],
"source": [
"from httomolibgpu.prep.stripe import remove_all_stripe\n",
"print (\"Remove stripes using remove_all_stripe\")\n",
"\n",
"sino3d = cp.ascontiguousarray(cp.ones((sinogram.shape[0], 3, sinogram.shape[1])), dtype=cp.float32)\n",
"sino3d[:,1,:] = sinogram\n",
"print(\"Remove stripes using remove_all_stripe\")\n",
"\n",
"sino3d = cp.ascontiguousarray(\n",
" cp.ones((sinogram.shape[0], 3, sinogram.shape[1])), dtype=cp.float32\n",
")\n",
"sino3d[:, 1, :] = sinogram\n",
"\n",
"sino3d = remove_all_stripe(sino3d)\n",
"\n",
"plt.figure()\n",
"plt.imshow(sino3d[:,1,:].get())\n",
"plt.imshow(sino3d[:, 1, :].get())\n",
"plt.title(\"Sinogram after stripes removal\")\n",
"plt.show()"
]
Expand Down Expand Up @@ -164,14 +170,14 @@
}
],
"source": [
"print (\"Perform Reconstruction using FBP\")\n",
"print(\"Perform Reconstruction using FBP\")\n",
"from httomolibgpu.recon.algorithm import FBP\n",
"\n",
"reconFBP = FBP(sino3d, angles=angles, center=cor)\n",
"reconFBP_np = reconFBP.get()\n",
"\n",
"plt.figure()\n",
"plt.imshow(reconFBP_np[500:2000,1,500:2000],vmin=0,vmax=0.01)\n",
"plt.imshow(reconFBP_np[500:2000, 1, 500:2000], vmin=0, vmax=0.01)\n",
"plt.title(\"Reconstruction with FBP\")\n",
"plt.show()"
]
Expand Down
34 changes: 21 additions & 13 deletions docs/source/examples/DistortionCorr.ipynb

Large diffs are not rendered by default.

34 changes: 21 additions & 13 deletions docs/source/examples/pipeline1_FBP.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -51,19 +51,25 @@
"\n",
"# Load the projection data\n",
"path_lib = os.path.dirname(httomolibgpu.__file__)\n",
"in_file = os.path.abspath(os.path.join(path_lib, '..', \"tests/test_data/\",'tomo_standard.npz'))\n",
"in_file = os.path.abspath(\n",
" os.path.join(path_lib, \"..\", \"tests/test_data/\", \"tomo_standard.npz\")\n",
")\n",
"datafile = np.load(in_file)\n",
"host_data = datafile['data']\n",
"host_flats = datafile['flats']\n",
"host_darks = datafile['darks']\n",
"host_data = datafile[\"data\"]\n",
"host_flats = datafile[\"flats\"]\n",
"host_darks = datafile[\"darks\"]\n",
"\n",
"print(\"The shape of the data is {} as (projections, detector Y, detector X)\".format(np.shape(host_data)))\n",
"print(\n",
" \"The shape of the data is {} as (projections, detector Y, detector X)\".format(\n",
" np.shape(host_data)\n",
" )\n",
")\n",
"\n",
"print(\"Normalising the data\")\n",
"data = cp.asarray(host_data)\n",
"flats = cp.asarray(host_flats)\n",
"darks = cp.asarray(host_darks)\n",
"data_normalised = normalize(data, flats, darks, cutoff = 10, minus_log = False)\n",
"data_normalised = normalize(data, flats, darks, cutoff=10, minus_log=False)\n",
"\n",
"sliceSel = 64\n",
"data_normalised_np = data_normalised.get()\n",
Expand Down Expand Up @@ -100,7 +106,7 @@
"source": [
"from httomolibgpu.recon.rotation import find_center_vo\n",
"\n",
"print (\"Finding the Center of Rotation for the reconstruction\")\n",
"print(\"Finding the Center of Rotation for the reconstruction\")\n",
"cor = find_center_vo(data_normalised, ind=64)\n",
"print(\"The found Center of Rotation is {}\".format(cor))"
]
Expand Down Expand Up @@ -131,8 +137,10 @@
"source": [
"from httomolibgpu.prep.phase import paganin_filter_tomopy\n",
"\n",
"print (\"Applying Paganin filter\")\n",
"phase_contrast_data = paganin_filter_tomopy(data_normalised, pixel_size=0.1, dist=50, energy=53, alpha=1e-5)\n",
"print(\"Applying Paganin filter\")\n",
"phase_contrast_data = paganin_filter_tomopy(\n",
" data_normalised, pixel_size=0.1, dist=50, energy=53, alpha=1e-5\n",
")\n",
"\n",
"sliceSel = 64\n",
"phase_contrast_data_np = phase_contrast_data.get()\n",
Expand Down Expand Up @@ -176,12 +184,12 @@
}
],
"source": [
"print (\"Perform Reconstruction using FBP\")\n",
"print(\"Perform Reconstruction using FBP\")\n",
"\n",
"from httomolibgpu.recon.algorithm import FBP\n",
"\n",
"angles = np.linspace(0.0 * np.pi / 180.0, 180.0 * np.pi / 180.0, data.shape[0])\n",
" \n",
"\n",
"reconFBP = FBP(phase_contrast_data, angles=angles, center=cor)\n",
"\n",
"reconFBP_np = reconFBP.get()\n",
Expand Down Expand Up @@ -226,12 +234,12 @@
}
],
"source": [
"print (\"Perform Reconstruction using Fourier (LPRec)\")\n",
"print(\"Perform Reconstruction using Fourier (LPRec)\")\n",
"\n",
"from httomolibgpu.recon.algorithm import LPRec\n",
"\n",
"angles = np.linspace(0.0 * np.pi / 180.0, 180.0 * np.pi / 180.0, data.shape[0])\n",
" \n",
"\n",
"reconLPRec = LPRec(phase_contrast_data, angles=angles, center=cor)\n",
"\n",
"reconLPRec_np = reconLPRec.get()\n",
Expand Down
42 changes: 26 additions & 16 deletions docs/source/examples/pipeline2_iterative.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
examples/pipeline1_FBP
examples/pipeline2_iterative
examples/Cor_largesino
examples/DistortionCorr
examples/DistortionCorr

.. _reference_content:

Expand Down
2 changes: 2 additions & 0 deletions httomolibgpu/cuda_kernels/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,10 @@
from typing import List, Optional, Tuple

from httomolibgpu import cupywrapper

cp = cupywrapper.cp


def load_cuda_module(
file: str,
name_expressions: Optional[List[str]] = None,
Expand Down
30 changes: 15 additions & 15 deletions httomolibgpu/cuda_kernels/calc_metrics.cu
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
*
* The key is the formula to calculate the Peason correlation coefficient.
* This is calculated manually for every shifted matrix position in the same kernel.
*
*
* The correlation coefficient between two vectors (we flatten the matrices) is:
*
* m1_norm = m1 - mean(m1)
* m2_norm = m2 - mean(m2)
* m1_sqr = dot(m1_norm, m1_norm)
* m1_sqr = dot(m1_norm, m1_norm)
* m2_sqr = dot(m2_norm, m2_norm)
* m1_m2 = dot(m1_norm, m2_norm)
* r = m1_m2 / sqrt(m1_sqr * m2_sqr)
*
* The kernels in the following compute these directly pretty much, taking into
* The kernels in the following compute these directly pretty much, taking into
* consideration normalisation, overlaps, and position offsets. Also note that the
* version with overlap requries 3 correlation coefficients (between 3 matrices).
*/
Expand Down Expand Up @@ -73,7 +73,7 @@ float clip(float x, float min, float max) {
}

__device__ inline
float sum_abs_row(const float* row, int win_width)
float sum_abs_row(const float* row, int win_width)
{
float sum_abs = 0.0;
for (int x = 0; x < win_width; ++x) {
Expand Down Expand Up @@ -116,7 +116,7 @@ __device__ void _calc_metrics_no_overlap(const float *mat1, int mat1_nx,
{
float norm_factor = 1.0f;
if (norm) {
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
sum_abs_row(&mat1_roi[y * mat1_nx], win_width);
}
for (int x = 0; x < win_width; ++x)
Expand All @@ -142,7 +142,7 @@ __device__ void _calc_metrics_no_overlap(const float *mat1, int mat1_nx,
{
float norm_factor = 1.0f;
if (norm) {
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
sum_abs_row(&mat1_roi[y * mat1_nx], win_width);
}
for (int x = 0; x < win_width; ++x)
Expand Down Expand Up @@ -200,10 +200,10 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
extern __shared__ float smem[];

// we need to space for 6 sum reductions for calculating the correlation coefficient
float v[6];
float v[6];

float d_ramp = 1.0f / (win_width - 1);

////////////////////////
// 1. We need the mean of the 3 matrices (flattend)
v[0] = 0.0f;
Expand All @@ -213,7 +213,7 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
{
float norm_factor = 1.0f;
if (norm) {
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
sum_abs_row(&mat1_roi[y * mat1_nx], win_width);
}
for (int x = 0; x < win_width; ++x)
Expand All @@ -222,8 +222,8 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
float ramp_up = 1.0f - ramp_down;
float mat1_roi_val = mat1_roi[y * mat1_nx + x] * norm_factor;
float mat2_roi_val = mat2_roi[y * mat2_nx + x];
float mat_comb_val = side == 1 ?
(mat1_roi_val * ramp_down + mat2_roi_val * ramp_up) :
float mat_comb_val = side == 1 ?
(mat1_roi_val * ramp_down + mat2_roi_val * ramp_up) :
(mat1_roi_val * ramp_up + mat2_roi_val * ramp_down);

v[0] += mat1_roi_val;
Expand Down Expand Up @@ -251,7 +251,7 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
{
float norm_factor = 1.0f;
if (norm) {
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
norm_factor = sum_abs_row(&mat2_roi[y * mat2_nx], win_width) /
sum_abs_row(&mat1_roi[y * mat1_nx], win_width);
}
for (int x = 0; x < win_width; ++x)
Expand All @@ -260,8 +260,8 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
float ramp_up = 1.0f - ramp_down;
float mat1_roi_val = mat1_roi[y * mat1_nx + x] * norm_factor;
float mat2_roi_val = mat2_roi[y * mat2_nx + x];
float mat_comb_val = side == 1 ?
(mat1_roi_val * ramp_down + mat2_roi_val * ramp_up) :
float mat_comb_val = side == 1 ?
(mat1_roi_val * ramp_down + mat2_roi_val * ramp_up) :
(mat1_roi_val * ramp_up + mat2_roi_val * ramp_down);

// for covariance matrix, we need to remove the mean first
Expand All @@ -279,7 +279,7 @@ __device__ void _calc_metrics_overlap(const float *mat1, int mat1_nx,
}

}

// 6 smem reductions
sum_reduction_n<6>(smem, v);

Expand Down
1 change: 0 additions & 1 deletion httomolibgpu/misc/corr.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,6 @@ def __median_filter(
axis: int = 0,
dif: float = 0.0,
) -> cp.ndarray:

try:
from cucim.skimage.filters import median
from cucim.skimage.morphology import disk
Expand Down
3 changes: 0 additions & 3 deletions httomolibgpu/misc/morph.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,6 @@ def sino_360_to_180(
def __sino_360_to_180(
data: cp.ndarray, overlap: int = 0, rotation: Literal["left", "right"] = "left"
) -> cp.ndarray:

if data.ndim != 3:
raise ValueError("only 3D data is supported")

Expand Down Expand Up @@ -136,7 +135,6 @@ def data_resampler(
def __data_resampler(
data: cp.ndarray, newshape: list, axis: int = 1, interpolation: str = "linear"
) -> cp.ndarray:

from cupyx.scipy.interpolate import interpn

if data.ndim != 3:
Expand Down Expand Up @@ -206,7 +204,6 @@ def __data_resampler(
res, [newshape[0], newshape[1]], order="C"
)
elif axis == 1:

for j in range(M):
res = interpn(
points,
Expand Down
Loading

0 comments on commit 053e780

Please sign in to comment.