From 19e117bc81f03943e0850c1d90a882c6c6a9fe27 Mon Sep 17 00:00:00 2001 From: Pierre-Antoine Comby Date: Fri, 1 Nov 2024 14:20:32 +0100 Subject: [PATCH 1/8] Paper for JOSS (#200) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * add draft paper.md * update * update * ci: build the pdf please * update affiliations * typo * Add model * update * format * format table * update * unindent table * update with bibliography * move paper in doc * update * update * Apply suggestions from code review Co-authored-by: Chaithya G R * add lnk to PR * update action * Fill-in affiliation and fix name * fix name * Apply suggestions from code review Co-authored-by: Guillaume Daval-Frérot * update authors order * smaller figure * Correct names * add orcid Co-authored-by: Chaithya G R * apply suggestion from guillaume Co-authored-by: Guillaume Daval-Frérot * Update paper.md * Update paper.md * Update paper.bib add pysap-mri paper * Update paper.md * Update paper.bib * Update paper.md --------- Co-authored-by: Chaithya G R Co-authored-by: Matteo Cencini <83717049+mcencini@users.noreply.github.com> Co-authored-by: LenaOudjman Co-authored-by: Guillaume Daval-Frérot Co-authored-by: Asma TANABENE <121893894+AsmaTANABEN@users.noreply.github.com> --- .github/workflows/draft-pdf.yml | 42 +++++++++ docs/paper-joss/paper.bib | 154 ++++++++++++++++++++++++++++++++ docs/paper-joss/paper.md | 137 ++++++++++++++++++++++++++++ 3 files changed, 333 insertions(+) create mode 100644 .github/workflows/draft-pdf.yml create mode 100644 docs/paper-joss/paper.bib create mode 100644 docs/paper-joss/paper.md diff --git a/.github/workflows/draft-pdf.yml b/.github/workflows/draft-pdf.yml new file mode 100644 index 00000000..b6649aad --- /dev/null +++ b/.github/workflows/draft-pdf.yml @@ -0,0 +1,42 @@ +name: Draft PDF +on: + push: + paths: + - docs/paper-joss/* + - .github/workflows/draft-pdf.yml* + +permissions: + pull-requests: write + +jobs: + paper: + runs-on: ubuntu-latest + name: Paper Draft + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Build draft PDF + uses: openjournals/openjournals-draft-action@master + with: + journal: joss + # This should be the path to the paper within your repo. + paper-path: docs/paper-joss/paper.md + - name: Upload + uses: actions/upload-artifact@v4 + with: + name: paper + # This is the output path where Pandoc will write the compiled + # PDF. Note, this should be the same directory as the input + # paper.md + path: docs/paper-joss/paper.pdf + + link: + needs: paper # make sure the artifacts are uploaded first + runs-on: ubuntu-latest + permissions: + contents: write # for commenting on your commit + pull-requests: write # for commenting on your pr + steps: + - uses: beni69/artifact-link@v1 + with: + token: ${{ github.token }} diff --git a/docs/paper-joss/paper.bib b/docs/paper-joss/paper.bib new file mode 100644 index 00000000..a31ab269 --- /dev/null +++ b/docs/paper-joss/paper.bib @@ -0,0 +1,154 @@ +@unpublished{shih_cufinufft_2021, + title = {{{cuFINUFFT}}: A Load-Balanced {{GPU}} Library for General-Purpose Nonuniform {{FFTs}}}, + shorttitle = {{{cuFINUFFT}}}, + author = {Shih, Yu-hsuan and Wright, Garrett and Andén, Joakim and Blaschke, Johannes and Barnett, Alex H.}, + date = {2021-03-25}, + eprint = {2102.08463}, + eprinttype = {arXiv}, + eprintclass = {cs, eess, math}, + url = {http://arxiv.org/abs/2102.08463}, + urldate = {2021-12-10}, + abstract = {Nonuniform fast Fourier transforms dominate the computational cost in many applications including image reconstruction and signal processing. We thus present a generalpurpose GPU-based CUDA library for type 1 (nonuniform to uniform) and type 2 (uniform to nonuniform) transforms in dimensions 2 and 3, in single or double precision. It achieves high performance for a given user-requested accuracy, regardless of the distribution of nonuniform points, via cache-aware point reordering, and load-balanced blocked spreading in shared memory. At low accuracies, this gives on-GPU throughputs around 109 nonuniform points per second, and (even including hostdevice transfer) is typically 4–10× faster than the latest parallel CPU code FINUFFT (at 28 threads). It is competitive with two established GPU codes, being up to 90× faster at high accuracy and/or type 1 clustered point distributions. Finally we demonstrate a 5–12× speedup versus CPU in an X-ray diffraction 3D iterative reconstruction task at 10−12 accuracy, observing excellent multi-GPU weak scaling up to one rank per GPU.}, + langid = {english}, + keywords = {{Computer Science - Distributed, Parallel, and Cluster Computing},Computer Science - Mathematical Software,Electrical Engineering and Systems Science - Signal Processing,Mathematics - Numerical Analysis,No DOI found}, + file = {/volatile/home/pc266769/Zotero/storage/K5LLWXZE/shih_cufinufft_2021.pdf} +} + +@inproceedings{uecker_berkley_2015, + title = {Berkley Advanced Reconstruction Toolbox}, + shorttitle = {Mrirecon/Bart}, + booktitle = {Proc. {{Intl}}. {{Soc}}. {{Mag}}. {{Reson}}. {{Med}}. 23}, + author = {Uecker, Martin and Ong, Frank and Tamir, J}, + date = {2015}, + location = {Toronto}, + url = {https://zenodo.org/records/10277939}, + urldate = {2023-12-19}, + keywords = {No DOI found}, + file = {/volatile/home/pc266769/Zotero/storage/LIMD2P5S/10277939.html} +} + +@inproceedings{ong_frank_sigpy_2019, + title = {{{SigPy}}: {{A Python Package}} for {{High Performance Iterative Reconstruction}}}, + booktitle = {{{ISMRM}} 2019}, + author = {{Ong Frank} and {Lustig Michael}}, + date = {2019}, + abstract = {We present SigPy, a Python package designed for high performance iterative reconstruction. Its main features include: - A unified CPU and GPU Python interface to signal processing functions, including convolution, FFT, NUFFT, wavelet transform, and thresholding functions. - Convenient classes (Linop, Prox, Alg, App) to build more complicated iterative reconstruction algorithms. - Commonly used MRI reconstruction methods as Apps, including SENSE, L1-wavelet regularized reconstruction, total-variation regularized reconstruction, and JSENSE. - MRI-specific functions, including poisson-disc sampling, ESPIRiT calibration, and non-Cartesian preconditioners. - Simple installation via pip and conda.}, + eventtitle = {{{ISMRM}}}, + keywords = {No DOI found} +} + +@article{sutton_fast_2003, + title = {Fast, Iterative Image Reconstruction for {{MRI}} in the Presence of Field Inhomogeneities}, + author = {Sutton, B.P. and Noll, D.C. and Fessler, J.A.}, + date = {2003-02}, + journaltitle = {IEEE Transactions on Medical Imaging}, + volume = {22}, + number = {2}, + pages = {178--188}, + issn = {1558-254X}, + doi = {10.1109/TMI.2002.808360}, + abstract = {In magnetic resonance imaging, magnetic field inhomogeneities cause distortions in images that are reconstructed by conventional fast Fourier transform (FFT) methods. Several noniterative image reconstruction methods are used currently to compensate for field inhomogeneities, but these methods assume that the field map that characterizes the off-resonance frequencies is spatially smooth. Recently, iterative methods have been proposed that can circumvent this assumption and provide improved compensation for off-resonance effects. However, straightforward implementations of such iterative methods suffer from inconveniently long computation times. This paper describes a tool for accelerating iterative reconstruction of field-corrected MR images: a novel time-segmented approximation to the MR signal equation. We use a min-max formulation to derive the temporal interpolator. Speedups of around 60 were achieved by combining this temporal interpolator with a nonuniform fast Fourier transform with normalized root mean squared approximation errors of 0.07\%. The proposed method provides fast, accurate, field-corrected image reconstruction even when the field map is not smooth.}, + eventtitle = {{{IEEE Transactions}} on {{Medical Imaging}}}, + keywords = {Biomedical engineering,Frequency,Image reconstruction,Image segmentation,Iterative methods,Magnetic fields,Magnetic resonance imaging,Optical imaging,Reconstruction algorithms,Spirals}, + file = {/volatile/home/pc266769/Zotero/storage/8XA5ZU44/sutton_fast_2003.pdf} +} + +@article{fessler_nonuniform_2003, + title = {Nonuniform Fast Fourier Transforms Using Min-Max Interpolation}, + author = {Fessler, J.A. and Sutton, B.P.}, + date = {2003-02}, + journaltitle = {IEEE Transactions on Signal Processing}, + shortjournal = {IEEE Trans. Signal Process.}, + volume = {51}, + number = {2}, + pages = {560--574}, + issn = {1053-587X}, + doi = {10.1109/tsp.2002.807005}, + url = {http://ieeexplore.ieee.org/document/1166689/}, + urldate = {2021-05-03}, + abstract = {The FFT is used widely in signal processing for efficient computation of the Fourier transform (FT) of finitelength signals over a set of uniformly-spaced frequency locations. However, in many applications, one requires nonuniform sampling in the frequency domain, i.e., a nonuniform FT. Several papers have described fast approximations for the nonuniform FT based on interpolating an oversampled FFT. This paper presents an interpolation method for the nonuniform FT that is optimal in the min-max sense of minimizing the worst-case approximation error over all signals of unit norm. The proposed method easily generalizes to multidimensional signals. Numerical results show that the min-max approach provides substantially lower approximation errors than conventional interpolation methods. The min-max criterion is also useful for optimizing the parameters of interpolation kernels such as the Kaiser-Bessel function.}, + langid = {english}, + file = {/volatile/home/pc266769/Zotero/storage/4NDF5834/fessler_nonuniform_2003.pdf} +} + +@article{wang_efficient_2023, + title = {Efficient Approximation of {{Jacobian}} Matrices Involving a Non-Uniform Fast {{Fourier}} Transform ({{NUFFT}})}, + author = {Wang, Guanhua and Fessler, Jeffrey A.}, + date = {2023}, + journaltitle = {IEEE Transactions on Computational Imaging}, + shortjournal = {IEEE Trans. Comput. Imaging}, + volume = {9}, + eprint = {2111.02912}, + eprinttype = {arXiv}, + eprintclass = {eess}, + pages = {43--54}, + issn = {2333-9403, 2334-0118, 2573-0436}, + doi = {10.1109/TCI.2023.3240081}, + url = {http://arxiv.org/abs/2111.02912}, + urldate = {2024-04-11}, + abstract = {There is growing interest in learning k-space sampling patterns for MRI using optimization approaches [1], [2], [3], [4]. For non-Cartesian sampling patterns, reconstruction methods typically involve non-uniform FFT (NUFFT) operations. A typical NUFFT method contains frequency domain interpolation using Kaiser-Bessel kernel values that are retrieved by nearest neighbor look-up in a finely tabulated kernel [5]. That look-up operation is not differentiable with respect to the sampling pattern, complicating auto-differentiation routines for backpropagation (stochastic gradient descent) for sampling pattern optimization. This paper describes an efficient and accurate approach for computing approximate gradients with respect to the sampling pattern for learning k-space sampling. Various numerical experiments validate the accuracy of the proposed approximation. We also showcase the trajectories optimized for different iterative reconstruction algorithms, including smooth convex regularized reconstruction and compressed sensing-based reconstruction.}, + langid = {english}, + keywords = {Electrical Engineering and Systems Science - Image and Video Processing,Electrical Engineering and Systems Science - Signal Processing}, + file = {/volatile/home/pc266769/Zotero/storage/HU6FNVQU/Wang et Fessler - 2023 - Efficient approximation of Jacobian matrices invol.pdf} +} +@inproceedings{knoll_gpunufft_2014, + title={gpuNUFFT - An Open Source GPU Library for 3D Regridding with Direct Matlab Interface}, + author={Florian Knoll and Andreas Schwarzl and Clemens Diwoky and Daniel K. Sodickson}, + year={2014}, + url={https://api.semanticscholar.org/CorpusID:53652346} +} +@inproceedings{muckley_torchkbnufft_2020, + author = {M. J. Muckley and R. Stern and T. Murrell and F. Knoll}, + title = {{TorchKbNufft}: A High-Level, Hardware-Agnostic Non-Uniform Fast {Fourier} Transform}, + booktitle = {ISMRM Workshop on Data Sampling \& Image Reconstruction}, + year = 2020, + note = {Source code available at https://github.com/mmuckley/torchkbnufft}, +} +@inproceedings{comby_snake-fmri_2024, + ids = {Comby_Vignaud_Ciuciu_2024}, + title = {{{SNAKE-fMRI}}: {{A}} Modular {{fMRI}} Simulator from the Space-Time Domain to k-Space Data and Back}, + booktitle = {{{ISMRM}} Annual Meeting, (in Press)}, + author = {Comby, P.-A. and Vignaud, A. and Ciuciu, P.}, + date = {2024}, + location = {Singapore}, + keywords = {No DOI found} +} + +@article{farrens_pysap_2020, + title = {{{PySAP}}: {{Python Sparse Data Analysis Package}} for Multidisciplinary Image Processing}, + shorttitle = {{{PySAP}}}, + author = {Farrens, S. and Grigis, A. and El Gueddari, L. and Ramzi, Z. and G.r., Chaithya and Starck, S. and Sarthou, B. and Cherkaoui, H. and Ciuciu, P. and Starck, J. -L.}, + date = {2020-07-01}, + journaltitle = {Astronomy and Computing}, + shortjournal = {Astronomy and Computing}, + volume = {32}, + pages = {100402}, + issn = {2213-1337}, + doi = {10.1016/j.ascom.2020.100402}, + url = {https://www.sciencedirect.com/science/article/pii/S2213133720300561}, + urldate = {2024-09-27}, + abstract = {We present the open-source image processing software package PySAP (Python Sparse data Analysis Package) developed for the COmpressed Sensing for Magnetic resonance Imaging and Cosmology (COSMIC) project. This package provides a set of flexible tools that can be applied to a variety of compressed sensing and image reconstruction problems in various research domains. In particular, PySAP offers fast wavelet transforms and a range of integrated optimisation algorithms. In this paper we present the features available in PySAP and provide practical demonstrations on astrophysical and magnetic resonance imaging data.}, + keywords = {Convex optimisation,Image processing,Open-source software,Reconstruction}, + file = {/volatile/home/pc266769/Zotero/storage/X4725MSA/Farrens et al. - 2020 - PySAP Python Sparse Data Analysis Package for multidisciplinary image processing.pdf} +} + +@software{tachella_deepinverse_2023, + title = {{{DeepInverse}}: {{A}} Deep Learning Framework for Inverse Problems in Imaging}, + shorttitle = {{{DeepInverse}}}, + author = {Tachella, Julian and Chen, Dongdong and Hurault, Samuel and Terris, Matthieu and Wang, Andrew}, + date = {2023-06}, + doi = {10.5281/zenodo.7982256}, + url = {https://github.com/deepinv/deepinv}, + urldate = {2024-09-27}, + abstract = {PyTorch library for solving imaging inverse problems using deep learning}, + version = {latest} +} + +@inproceedings{gueddari_pysap-mri_2020, + ids = {gueddari_pysap-mri_2020-1,gueddari_pysap-mri_2020-2}, + title = {{{PySAP-MRI}}: A Python Package for {{MR}} Image Reconstruction}, + booktitle = {{{ISMRM}} Workshop on Data Sampling and Image Reconstruction}, + author = {Gueddari, Loubna and Gr, Chaithya and Ramzi, Zaccharie and Farrens, Samuel and Starck, Sophie and Grigis, Antoine and Starck, Jean-Luc and Ciuciu, Philippe}, + year = {2020}, +} + diff --git a/docs/paper-joss/paper.md b/docs/paper-joss/paper.md new file mode 100644 index 00000000..b5304241 --- /dev/null +++ b/docs/paper-joss/paper.md @@ -0,0 +1,137 @@ +--- +title: 'MRI-NUFFT: Doing non-Cartesian MRI has never been easier' +tags: + - Python + - MRI + - NUFFT + - Numpy + - CUDA + - Torch +authors: + - name: Pierre-Antoine Comby + orcid: 0000-0001-6998-232X + corresponding: true + affiliation: "1, 2" + - name: Guillaume Daval-Frérot + orcid: 0000-0002-5317-2522 + affiliation: 3 + - name: Caini Pan + affiliation: "1, 2" + - name: Asma Tanabene + affiliation: "1,2,5" + - name: Léna Oudjman + affiliation: "1, 2" + - name: Matteo Cencini + affiliation: 4 + - name: Philippe Ciuciu + orcid: 0000-0001-5374-962X + affiliation: "1,2" + - name: Chaithya GR + orcid: 0000-0001-9859-6006 + corresponding: true + affiliation: "1,2" + +affiliations: + - name: MIND, Inria + index: 1 + - name: Université Paris-Saclay / CEA + index: 2 + - name: Chipiron + index: 3 + - name: INFN, Pisa Division + index: 4 + - name: Siemens Healthineers + index: 5 + +date: 20 September 2024 +bibliography: paper.bib +--- + + +# Summary +MRI-NUFFT is a python package that provides a universal interface to various Non-Uniform Fast Fourier Transform libraries running on CPU or GPU (gpuNUFFT, FINUFFT, CUFINUFFT, pyNFFT), adding compatibily with standard array library (NumPy, CuPy, PyTorch, TensorFlow, etc.) On top of these libraries it extends the existing NUFFT operations to provide a physical model of the MRI acquisition process (e.g. multi-coil acquisition and static-field inhomogeneities). It also provides a wide variety of customizable implementations of non-Cartesian sampling trajectories, as well as density compensation methods. Finally, it proposes optimized auto-differentiation with respect to the data and sampling locations for machine learning. With MRI-NUFFT one can experiment with non-Cartesian sampling in MRI, get access to the latest advances in the field and state-of-the-art sampling patterns. + + +# Statement of need +MRI is an non-invasive biomedical imaging technique, where raw data is sampled in the spatial frequency domain (k-space) and final images are obtained by applying an inverse (fast) Fourier transform on this data. +Traditionnaly, the data is sampled on a Cartesian grid (often partially by skipping lines to accelerate the acquisition) and reconstructed using FFT-based algorithms. +However, the Cartesian approach is not always the best choice for data collection, and non-Cartesian sampling schemes have been proposed to improve image quality, reduce acquisition time or enable new imaging modalities. But the reconstruction of non-Cartesian data is more challenging and requires the use of non-uniform fast Fourier transform (NUFFT) algorithms. +Several NUFFT libraries have been developed in the past few years, but they are not always easy to use or don't account for the specificities of MRI data acquisition (e.g. multi-coil acquisition, static-field inhomogeneities, density compensation, etc.). Also their performances can vary a lot depending on the use cases (2D vs 3D data, number of coils, etc.). + +Moreover, non-Cartesian acquisitions are still an active research field, with new sampling patterns being proposed regularly. With MRI-NUFFT one can easily experiment with these new patterns and compare them to existing ones. +Furthermore, there has been a growing interest in using deep learning to jointly learn MRI acquisition and reconstruction, which requires to compute the gradients of the reconstruction with respect to the raw data and/or the sampling locations. + +# Features + +![MRI-NUFFT as an interface for non-Cartesian MRI](../_static/mri-nufft-scheme.svg){width=10cm} + +## NUFFT Library compatibility +MRI-NUFFT is compatible with the following NUFFT librairies: FINUFFT[@barnett_parallel_2019], CUFINUFFT[@shih_cufinufft_2021], gpuNUFFT[@knoll_gpunufft_2014], TorchKbNufft[@muckley_torchkbnufft_2020], pyNFFT, sigpy[@ong_frank_sigpy_2019] and BART[@uecker_berkley_2015]. +Using our [benchmark](https://github.com/mind-inria/mri-nufft-benchmark/) we can also determine which NUFFT implementation provides the best performances both in term of computation time and memory footprint. At the time of writing, cufinufft and gpunufft provide the best performances by leveraging CUDA acceleration. MRI-NUFFT supports as well standard array libraries (NumPy, CuPy, PyTorch, TensorFlow, etc.) and optimizes data copies, relying on the array-API standard. +It also provides several enhancements on top of these backends, notably an optimized 2.5D NUFFT (for stacks of 2D non uniform trajectories, commonly used in MRI), and a data-consistency term for iterative reconstruction ($\mathcal{F}_\Omega^*(\mathcal{F}_\Omega x - y)$). + + +## Extended Fourier Model +MRI-NUFFT provides a physical model of the MRI acquisition processus, including multi-coil acquisition and static-field inhomogeneities. This model is compatible with the NUFFT libraries, and can be used to simulate the acquisition of MRI data, or to reconstruct data from a given set of measurements. Namely we provide a linear operator that encapsulates the forward and adjoint NUFFT operators, the coil sensitivity maps and (optionnaly) the static field inhomogeneities. The forward model is described by the following equation: +$$y(\boldsymbol{\nu}_i) = \sum_{j=1}^N x(\boldsymbol{u}_j) e^{-2\imath\pi\boldsymbol{u}_j\cdot\boldsymbol{\nu_i}} + n_i, \quad i=1,\dots,M$$ +where: +$x(\boldsymbol{u})$ is the spatially varying image contrast acquired; $y_1, \dots, y_M$ are the sampled points at frequency locations; $\Omega=\lbrace \boldsymbol{\nu}_1, \dots, \boldsymbol{\nu}_M \in [-1/2, 1/2]^d\rbrace$; $\boldsymbol{u}_j$ are the $N$ spatial locations of image voxels, and $n_i$ is a zero-mean complex-valued Gaussian noise, modeling the thermal noise of the scanner. + +This can also be formulated using the operator notation $\boldsymbol{y} = \mathcal{F}_\Omega (\boldsymbol{x}) + \boldsymbol{n}$ + +As the sampling locations $\Omega$ are non-uniform and the image locations $\boldsymbol{u}_j$ are uniform, $\mathcal{F}_\Omega$ is a NUDFT operator, and the equation above describe a Type 2 NUDFT. +Similarly the adjoint operator is a Type 1 NUDFT: + +: Correspondence Table between NUFFT and MRI acquisition model. + +| NUFFT Type | Operation | MRI Transform | Operator | +|:-----------|:----------|:-------------------|:-----------------------| +| Type 1 | Adjoint | K-space $\to$ image | $\mathcal{F}_\Omega^*$ | +| Type 2 | Forward | Image $\to$ k-space | $\mathcal{F}_\Omega$ | + + +### Parallel Imaging Model +In MRI the acquired signal can be received by multiple antennas (\"coils\"). +Each coil possesses a specific sensitivity profile (i.e. each sees the object differently due to its physical layout). + +$$\begin{aligned} +\tilde{\boldsymbol{y}} = \begin{bmatrix} + \mathcal{F}_\Omega S_1 \\ + \vdots \\ + \mathcal{F}_\Omega S_L \\ + \end{bmatrix} + \boldsymbol{x} + \boldsymbol{n}_\ell = \mathcal{F}_\Omega S \otimes \boldsymbol{x} + \tilde{\boldsymbol{n}} +\end{aligned}$$ + +where $S_1, \dots, S_L$ are the sensitivity maps of each coil. +Such maps can be acquired separately by sampling the k-space low frequencies, or estimated from the data. + +### Off-resonance correction model +The constant magnetic field $B0$ applied in an MRI machine (typically 1.5, 3 or 7 teslas) is inherently disturbed by metal implants or even simply by difference in magnetic susceptibilities of tissues (such at air-tissue interfaces close to the nose and ear canals). +Those field perturbations introduce a spatially varying phase shift in the acquired frequencies (noted $\Delta\omega_0$), causing the physical model to deviate from the ideal Fourier model. +Fortunately, this inhomogeneity map can be acquired separately or estimated then integrated as: + +$$y(t_i) = \int_{\mathbb{R}^d} x(\boldsymbol{u}) e^{-2\imath\pi \boldsymbol{u} \cdot\boldsymbol{\nu_i} + \Delta\omega(\boldsymbol{u}) t_i} d\boldsymbol{u}$$ + +where $t_i$ is the time at which the frequency $\nu_i$ is acquired. +With these mixed-domain field perturbations, the Fourier model does not hold anymore and the FFT algorithm can no longer be used. +The main solution [@sutton_fast_2003] is to interpolate the mixed-domain exponential term by splitting it into single-domain weights $b_{m, \ell}$ and $c_{\ell, n}, where $L \ll M, N$ regular Fourier transforms are performed to approximate the non-Fourier transform. + +$$x(\boldsymbol{u_n}) = \sum_{\ell=1}^L c_{\ell, n} \sum_{m}^M y(t_m) b_{m, \ell} e^{2\imath\pi \boldsymbol{u} \cdot \boldsymbol{\nu_i}}$$ + +The coefficients $B=(b_{m, \ell}) \in \mathbb{C}^{M\times L}$ and $C=(c_\ell, n) \in \mathbb{C}^{L\times N}$ can be estimated within MRI-NUFFT. + +## Trajectories generation and expansions +MRI-NUFFT comes with a wide variety of non-Cartesian trajectory generation routines that have been gathered from the literature. It also provides ways to extend existing trajectories and export them to specific formats, for use in other toolkits and on MRI hardware. + +## Auto-differentiation for data and sampling pattern + +Following the formulation of [@wang_efficient_2023], MRI-NUFFT provides automatic differentiation for all NUFFT backends, with respect to both gradients and data (image or k-space). This enables efficient backpropagation through NUFFT operators and supports research on learned sampling model and image reconstruction network. + +# MRI-NUFFT utilization +MRI-NUFFT is already used in conjunction with other software such as SNAKE-fMRI [@comby_snake-fmri_2024], deepinv [@tachella_deepinverse_2023] and PySAP-MRI [@farrens_pysap_2020; @gueddari_pysap-mri_2020] + +# References + + + From 14c2738a9a677b3b7bb0b5575f07a881929b4e5a Mon Sep 17 00:00:00 2001 From: LenaOudjman Date: Wed, 13 Nov 2024 10:30:45 +0100 Subject: [PATCH 2/8] Simple unet training (#199) * add cg function in base * add test cg * fix cg function 1 * some changes * fix minore : import and style * first draft simple unet * new draft * reduce artifacts of radical traj * minore * minore * style * change traj and norm * clean in a jupyter , fix error * styles * rm cg stuff, fix more * add fastmri import * \!docs_build * fix ref * rename and move * syntaxe fix * try differente lr * \!docs_build * Updates to trajectory * \!docs_build finalizing changes --------- Co-authored-by: Lena OUDJMAN Co-authored-by: Chaithya G R --- .github/workflows/test-ci.yml | 2 +- examples/GPU/example_fastMRI_UNet.py | 226 +++++++++++++++++++++++++++ 2 files changed, 227 insertions(+), 1 deletion(-) create mode 100644 examples/GPU/example_fastMRI_UNet.py diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index e0ea2afe..dcb920bf 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -207,7 +207,7 @@ jobs: run: | python -m pip install --upgrade pip python -m pip install -e .[test,dev] - python -m pip install finufft pooch brainweb-dl torch + python -m pip install finufft pooch brainweb-dl torch fastmri - name: Install GPU related interfaces run: | diff --git a/examples/GPU/example_fastMRI_UNet.py b/examples/GPU/example_fastMRI_UNet.py new file mode 100644 index 00000000..2b46ecab --- /dev/null +++ b/examples/GPU/example_fastMRI_UNet.py @@ -0,0 +1,226 @@ +# %% +r""" +================== +Simple UNet model. +================== + +This model is a simplified version of the U-Net architecture, +which is widely used for image segmentation tasks. +This is implemented in the proprietary FASTMRI package [fastmri]_. + +The U-Net model consists of an encoder (downsampling path) and +a decoder (upsampling path) with skip connections between corresponding +layers in the encoder and decoder. +These skip connections help in retaining spatial information +that is lost during the downsampling process. + +The primary purpose of this model is to perform image reconstruction tasks, +specifically for MRI images. +It takes an input MRI image and reconstructs it to improve the image quality +or to recover missing parts of the image. + +This implementation of the UNet model was pulled from the FastMRI Facebook +repository, which is a collaborative research project aimed at advancing +the field of medical imaging using machine learning techniques. + +.. math:: + + \mathbf{\hat{x}} = \mathrm{arg} \min_{\mathbf{x}} || \mathcal{U}_\mathbf{\theta}(\mathbf{y}) - \mathbf{x} ||_2^2 + +where: +- \( \mathbf{\hat{x}} \) is the reconstructed MRI image, +- \( \mathbf{x} \) is the ground truth image, +- \( \mathbf{y} \) is the input MRI image (e.g., k-space data), +- \( \mathcal{U}_\mathbf{\theta} \) is the U-Net model parameterized by \( \theta \). + +.. warning:: + We train on a single image here. In practice, this should be done on a database like fastMRI [fastmri]_. +""" + +# %% +# Imports +import os +from pathlib import Path +import shutil +import brainweb_dl as bwdl +import matplotlib.pyplot as plt +import numpy as np +import torch +from tqdm import tqdm +import time +import joblib +from PIL import Image +import tempfile as tmp + +from fastmri.models import Unet +from mrinufft import get_operator +from mrinufft.trajectories import initialize_2D_cones + +# %% +# Setup a simple class for the U-Net model + + +class Model(torch.nn.Module): + """Model for MRI reconstruction using a U-Net.""" + + def __init__(self, initial_trajectory): + super().__init__() + self.operator = get_operator("gpunufft", wrt_data=True)( + initial_trajectory, + shape=(256, 256), + density=True, + squeeze_dims=False, + ) + self.unet = Unet(in_chans=1, out_chans=1, chans=32, num_pool_layers=4) + + def forward(self, kspace): + """Forward pass of the model.""" + image = self.operator.adj_op(kspace) + recon = self.unet(image.float()).abs() + recon /= torch.mean(recon) + return recon + + +# %% +# Utility function to plot the state of the model +def plot_state(axs, mri_2D, traj, recon, loss=None, save_name=None): + """Image plotting function. + + Plot the original MRI image, the trajectory, the reconstructed image, + and the loss curve (if provided). Saves the plot if a filename is provided. + + Parameters + ---------- + axs (numpy array): Array of matplotlib axes to plot on. + mri_2D (torch.Tensor): Original MRI image. + traj : Trajectory. + recon (torch.Tensor): Reconstructed image after training. + loss (list, optional): List of loss values to plot. Defaults to None. + save_name (str, optional): Filename to save the plot. Defaults to None. + """ + axs = axs.flatten() + axs[0].imshow(np.abs(mri_2D[0]), cmap="gray") + axs[0].axis("off") + axs[0].set_title("MR Image") + axs[1].scatter(*traj.T, s=0.5) + axs[1].set_title("Trajectory") + axs[2].imshow(np.abs(recon[0][0].detach().cpu().numpy()), cmap="gray") + axs[2].axis("off") + axs[2].set_title("Reconstruction") + if loss is not None: + axs[3].plot(loss) + axs[3].grid("on") + axs[3].set_title("Loss") + if save_name is not None: + plt.savefig(save_name, bbox_inches="tight") + plt.close() + else: + plt.show() + + +# %% +# Setup Inputs (models, trajectory and image) +init_traj = initialize_2D_cones(32, 256).reshape(-1, 2).astype(np.float32) +model = Model(init_traj) +model.eval() + +# %% +# Get the image on which we will train our U-Net Model +mri_2D = torch.Tensor(np.flipud(bwdl.get_mri(4, "T1")[80, ...]).astype(np.complex64))[ + None +] +mri_2D = mri_2D / torch.mean(mri_2D) +kspace_mri_2D = model.operator.op(mri_2D) + +# Before training, here is the simple reconstruction we have using a +# density compensated adjoint. +dc_adjoint = model.operator.adj_op(kspace_mri_2D) +fig, axs = plt.subplots(1, 3, figsize=(15, 5)) +plot_state(axs, mri_2D, init_traj, dc_adjoint) + + +# %% +# Start training loop +epoch = 100 +optimizer = torch.optim.RAdam(model.parameters(), lr=1e-3) +losses = [] # Store the loss values and create an animation +image_files = [] # Store the images to create a gif +model.train() + +with tqdm(range(epoch), unit="steps") as tqdms: + for i in tqdms: + out = model(kspace_mri_2D) # Forward pass + + loss = torch.nn.functional.l1_loss(out, mri_2D[None]) # Compute loss + tqdms.set_postfix({"loss": loss.item()}) # Update progress bar + losses.append(loss.item()) # Store loss value + + optimizer.zero_grad() # Zero gradients + loss.backward() # Backward pass + optimizer.step() # Update weights + + # Generate images for gif + hashed = joblib.hash((i, "learn_traj", time.time())) + filename = f"{tmp.NamedTemporaryFile().name}.png" + fig, axs = plt.subplots(2, 2, figsize=(10, 10)) + plot_state( + axs, + mri_2D, + init_traj, + out, + losses, + save_name=filename, + ) + image_files.append(filename) + + +# Make a GIF of all images. +imgs = [Image.open(img) for img in image_files] +imgs[0].save( + "mrinufft_learn_unet.gif", + save_all=True, + append_images=imgs[1:], + optimize=False, + duration=2, + loop=0, +) +# sphinx_gallery_start_ignore +# Cleanup +for f in image_files: + try: + os.remove(f) + except OSError: + continue +# don't raise errors from pytest. +# This will only be executed for the sphinx gallery stuff + +try: + final_dir = ( + Path(os.getcwd()).parent.parent + / "docs" + / "generated" + / "autoexamples" + / "GPU" + / "images" + ) + shutil.copyfile("mrinufft_learn_Unet.gif", final_dir / "mrinufft_learn_Unet.gif") +except FileNotFoundError: + pass +# sphinx_gallery_end_ignore +# %% +# Reconstruction from partially trained U-Net model +model.eval() +new_recon = model(kspace_mri_2D) +fig, axs = plt.subplots(2, 2, figsize=(10, 10)) +plot_state(axs, mri_2D, init_traj, new_recon, losses) +plt.show() + +# %% +# References +# ========== +# +# .. [fastmri] O. Ronneberger, P. Fischer, and Thomas Brox. U-net: Convolutional networks +# for biomedical image segmentation. In International Conference on Medical +# image computing and computer-assisted intervention, pages 234–241. +# Springer, 2015. +# https://github.com/facebookresearch/fastMRI/blob/main/fastmri/models/unet.py From a6bd869b14fe53327b6c6ae52dfbbcd8e1a8d64d Mon Sep 17 00:00:00 2001 From: Chaithya G R Date: Thu, 14 Nov 2024 09:26:48 +0100 Subject: [PATCH 3/8] HotFix for docs (#207) * HotFix for docs * !docs_build --- .github/workflows/master-cd.yml | 1 + .github/workflows/test-ci.yml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/master-cd.yml b/.github/workflows/master-cd.yml index 6dad6755..47166fe0 100644 --- a/.github/workflows/master-cd.yml +++ b/.github/workflows/master-cd.yml @@ -42,6 +42,7 @@ jobs: destination_dir: . # Ensure you deploy to the root of the gh-pages branch publish_branch: gh-pages keep_files: false + # build-n-publish: # name: Build and publish Python 🐍 distributions 📦 to TestPyPI diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index dcb920bf..e53549de 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -215,7 +215,7 @@ jobs: export PATH=/usr/local/cuda-12.1/bin/:${PATH} export LD_LIBRARY_PATH=/usr/local/cuda-12.1/lib64/:${LD_LIBRARY_PATH} pip install cupy-cuda12x torch - python -m pip install gpuNUFFT "cufinufft<2.3" sigpy scikit-image + python -m pip install gpuNUFFT "cufinufft<2.3" sigpy scikit-image fastmri - name: Run examples shell: bash From 2d05f419842b41eec3fd00d0e466d1defbcc3bf9 Mon Sep 17 00:00:00 2001 From: Chaithya G R Date: Mon, 18 Nov 2024 10:26:30 +0100 Subject: [PATCH 4/8] Fix example U Net learning (#209) * Added * Fix * Remove bymistake add * Fix * Fixed lint * Lint * Added refbackend * Fix NDFT * feat: use finufft as ref backend. * feat(tests): move ndft vs nufft tests to own file. * \docs_build [docs] * Added some changes for and some helopful gitignore --------- Co-authored-by: chaithyagr Co-authored-by: Pierre-antoine Comby --- .github/workflows/test-ci.yml | 8 ++++---- .gitignore | 2 ++ examples/GPU/example_fastMRI_UNet.py | 23 +++++++++++++++-------- 3 files changed, 21 insertions(+), 12 deletions(-) diff --git a/.github/workflows/test-ci.yml b/.github/workflows/test-ci.yml index e53549de..5ae8cd2d 100644 --- a/.github/workflows/test-ci.yml +++ b/.github/workflows/test-ci.yml @@ -20,7 +20,7 @@ env: jobs: test-cpu: runs-on: cpu - if: ${{ !contains(github.event.head_commit.message, '!style') || github.ref == 'refs/heads/master' }} + if: ${{ !contains(github.event.head_commit.message, '[style]') || github.ref == 'refs/heads/master' }} strategy: matrix: backend: [finufft, pynfft, pynufft-cpu, bart, sigpy, torchkbnufft-cpu] @@ -97,7 +97,7 @@ jobs: test-gpu: runs-on: gpu - if: ${{ !contains(github.event.head_commit.message, '!style') || github.ref == 'refs/heads/master' }} + if: ${{ !contains(github.event.head_commit.message, '[style]') || github.ref == 'refs/heads/master' }} strategy: matrix: backend: [cufinufft, gpunufft, torchkbnufft-gpu, tensorflow] @@ -186,7 +186,7 @@ jobs: test-examples: runs-on: gpu needs: get-commit-message - if: ${{ !contains(needs.get-commit-message.outputs.message, '!style') || github.ref == 'refs/heads/master' }} + if: ${{ !contains(needs.get-commit-message.outputs.message, '[style]') || github.ref == 'refs/heads/master' }} steps: - uses: actions/checkout@v4 @@ -298,7 +298,7 @@ jobs: name: Build API Documentation runs-on: gpu needs: get-commit-message - if: ${{ contains(needs.get-commit-message.outputs.message, '!docs_build') || github.ref == 'refs/heads/master' }} + if: ${{ contains(needs.get-commit-message.outputs.message, '[docs]') || github.ref == 'refs/heads/master' }} steps: - name: Checkout uses: actions/checkout@v4 diff --git a/.gitignore b/.gitignore index c3f2bdd3..113867bd 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,6 @@ *.npy +*.gif +docs/sg_execution_times.rst build/ dist/ *.egg-info/ diff --git a/examples/GPU/example_fastMRI_UNet.py b/examples/GPU/example_fastMRI_UNet.py index 2b46ecab..036914f0 100644 --- a/examples/GPU/example_fastMRI_UNet.py +++ b/examples/GPU/example_fastMRI_UNet.py @@ -27,11 +27,8 @@ \mathbf{\hat{x}} = \mathrm{arg} \min_{\mathbf{x}} || \mathcal{U}_\mathbf{\theta}(\mathbf{y}) - \mathbf{x} ||_2^2 -where: -- \( \mathbf{\hat{x}} \) is the reconstructed MRI image, -- \( \mathbf{x} \) is the ground truth image, -- \( \mathbf{y} \) is the input MRI image (e.g., k-space data), -- \( \mathcal{U}_\mathbf{\theta} \) is the U-Net model parameterized by \( \theta \). +where :math:`\mathbf{\hat{x}}` is the reconstructed MRI image, :math:`\mathbf{x}` is the ground truth image, +:math:`\mathbf{y}` is the input MRI image (e.g., k-space data), and :math:`\mathcal{U}_\mathbf{\theta}` is the U-Net model parameterized by :math:`\theta`. .. warning:: We train on a single image here. In practice, this should be done on a database like fastMRI [fastmri]_. @@ -141,13 +138,13 @@ def plot_state(axs, mri_2D, traj, recon, loss=None, save_name=None): # %% # Start training loop -epoch = 100 +num_epochs = 2 optimizer = torch.optim.RAdam(model.parameters(), lr=1e-3) losses = [] # Store the loss values and create an animation image_files = [] # Store the images to create a gif model.train() -with tqdm(range(epoch), unit="steps") as tqdms: +with tqdm(range(num_epochs), unit="steps") as tqdms: for i in tqdms: out = model(kspace_mri_2D) # Forward pass @@ -203,10 +200,20 @@ def plot_state(axs, mri_2D, traj, recon, loss=None, save_name=None): / "GPU" / "images" ) - shutil.copyfile("mrinufft_learn_Unet.gif", final_dir / "mrinufft_learn_Unet.gif") + shutil.copyfile("mrinufft_learn_unet.gif", final_dir / "mrinufft_learn_unet.gif") except FileNotFoundError: pass + # sphinx_gallery_end_ignore + +# sphinx_gallery_thumbnail_path = 'generated/autoexamples/GPU/images/mrinufft_learn_unet.gif' + +# %% +# .. image-sg:: /generated/autoexamples/GPU/images/mrinufft_learn_unet.gif +# :alt: example learn_samples +# :srcset: /generated/autoexamples/GPU/images/mrinufft_learn_unet.gif +# :class: sphx-glr-single-img + # %% # Reconstruction from partially trained U-Net model model.eval() From a21777fe44e02efdd6b3a93afef00c5c9a9d6418 Mon Sep 17 00:00:00 2001 From: Chaithya G R Date: Mon, 18 Nov 2024 11:35:15 +0100 Subject: [PATCH 5/8] [hotfix] Fix back the num_epochs --- examples/GPU/example_fastMRI_UNet.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/GPU/example_fastMRI_UNet.py b/examples/GPU/example_fastMRI_UNet.py index 036914f0..230a5472 100644 --- a/examples/GPU/example_fastMRI_UNet.py +++ b/examples/GPU/example_fastMRI_UNet.py @@ -138,7 +138,7 @@ def plot_state(axs, mri_2D, traj, recon, loss=None, save_name=None): # %% # Start training loop -num_epochs = 2 +num_epochs = 100 optimizer = torch.optim.RAdam(model.parameters(), lr=1e-3) losses = [] # Store the loss values and create an animation image_files = [] # Store the images to create a gif From 175d752a37d5284045f3f7ab8809023f5bcc7d84 Mon Sep 17 00:00:00 2001 From: Pierre-antoine Comby Date: Tue, 19 Nov 2024 13:28:55 +0100 Subject: [PATCH 6/8] feat: tidy bib --- docs/paper-joss/paper.bib | 219 +++++++++++++++++--------------------- 1 file changed, 95 insertions(+), 124 deletions(-) diff --git a/docs/paper-joss/paper.bib b/docs/paper-joss/paper.bib index a31ab269..2cf9fb4b 100644 --- a/docs/paper-joss/paper.bib +++ b/docs/paper-joss/paper.bib @@ -1,154 +1,125 @@ @unpublished{shih_cufinufft_2021, - title = {{{cuFINUFFT}}: A Load-Balanced {{GPU}} Library for General-Purpose Nonuniform {{FFTs}}}, - shorttitle = {{{cuFINUFFT}}}, - author = {Shih, Yu-hsuan and Wright, Garrett and Andén, Joakim and Blaschke, Johannes and Barnett, Alex H.}, - date = {2021-03-25}, - eprint = {2102.08463}, - eprinttype = {arXiv}, - eprintclass = {cs, eess, math}, - url = {http://arxiv.org/abs/2102.08463}, - urldate = {2021-12-10}, - abstract = {Nonuniform fast Fourier transforms dominate the computational cost in many applications including image reconstruction and signal processing. We thus present a generalpurpose GPU-based CUDA library for type 1 (nonuniform to uniform) and type 2 (uniform to nonuniform) transforms in dimensions 2 and 3, in single or double precision. It achieves high performance for a given user-requested accuracy, regardless of the distribution of nonuniform points, via cache-aware point reordering, and load-balanced blocked spreading in shared memory. At low accuracies, this gives on-GPU throughputs around 109 nonuniform points per second, and (even including hostdevice transfer) is typically 4–10× faster than the latest parallel CPU code FINUFFT (at 28 threads). It is competitive with two established GPU codes, being up to 90× faster at high accuracy and/or type 1 clustered point distributions. Finally we demonstrate a 5–12× speedup versus CPU in an X-ray diffraction 3D iterative reconstruction task at 10−12 accuracy, observing excellent multi-GPU weak scaling up to one rank per GPU.}, - langid = {english}, - keywords = {{Computer Science - Distributed, Parallel, and Cluster Computing},Computer Science - Mathematical Software,Electrical Engineering and Systems Science - Signal Processing,Mathematics - Numerical Analysis,No DOI found}, - file = {/volatile/home/pc266769/Zotero/storage/K5LLWXZE/shih_cufinufft_2021.pdf} + title = {{{cuFINUFFT}}: A Load-Balanced {{GPU}} Library for General-Purpose Nonuniform {{FFTs}}}, + shorttitle = {{cuFINUFFT}}, + author = {Shih, Yu-hsuan and Wright, Garrett and Andén, Joakim and Blaschke, Johannes and Barnett, Alex H.}, + url = {http://arxiv.org/abs/2102.08463}, + urldate = {2021-12-10}, + date = {2021-03-25}, + eprint = {2102.08463}, + doi = {10.48550/arXiv.2102.08463}, + eprinttype = {arXiv}, + eprintclass = {cs, eess, math}, + langid = {english} } - @inproceedings{uecker_berkley_2015, - title = {Berkley Advanced Reconstruction Toolbox}, - shorttitle = {Mrirecon/Bart}, - booktitle = {Proc. {{Intl}}. {{Soc}}. {{Mag}}. {{Reson}}. {{Med}}. 23}, - author = {Uecker, Martin and Ong, Frank and Tamir, J}, - date = {2015}, - location = {Toronto}, - url = {https://zenodo.org/records/10277939}, - urldate = {2023-12-19}, - keywords = {No DOI found}, - file = {/volatile/home/pc266769/Zotero/storage/LIMD2P5S/10277939.html} + title = {Berkley Advanced Reconstruction Toolbox}, + shorttitle = {Mrirecon/Bart}, + author = {Uecker, Martin and Ong, Frank and Tamir, J}, + booktitle = {Proc. {{Intl}}. {{Soc}}. {{Mag}}. {{Reson}}. {{Med}}. 23}, + location = {Toronto}, + url = {https://zenodo.org/records/10277939}, + doi = {10.5281/zenodo.592960}, + urldate = {2023-12-19}, + date = 2015 } - @inproceedings{ong_frank_sigpy_2019, - title = {{{SigPy}}: {{A Python Package}} for {{High Performance Iterative Reconstruction}}}, - booktitle = {{{ISMRM}} 2019}, - author = {{Ong Frank} and {Lustig Michael}}, - date = {2019}, - abstract = {We present SigPy, a Python package designed for high performance iterative reconstruction. Its main features include: - A unified CPU and GPU Python interface to signal processing functions, including convolution, FFT, NUFFT, wavelet transform, and thresholding functions. - Convenient classes (Linop, Prox, Alg, App) to build more complicated iterative reconstruction algorithms. - Commonly used MRI reconstruction methods as Apps, including SENSE, L1-wavelet regularized reconstruction, total-variation regularized reconstruction, and JSENSE. - MRI-specific functions, including poisson-disc sampling, ESPIRiT calibration, and non-Cartesian preconditioners. - Simple installation via pip and conda.}, - eventtitle = {{{ISMRM}}}, - keywords = {No DOI found} + title = {{{SigPy}}: {{A Python Package}} for {{High Performance Iterative Reconstruction}}}, + author = {{Ong Frank} and {Lustig Michael}}, + booktitle = {{{ISMRM}} 2019}, + date = 2019, + eventtitle = {{ISMRM}} } - @article{sutton_fast_2003, - title = {Fast, Iterative Image Reconstruction for {{MRI}} in the Presence of Field Inhomogeneities}, - author = {Sutton, B.P. and Noll, D.C. and Fessler, J.A.}, - date = {2003-02}, + title = {Fast, Iterative Image Reconstruction for {{MRI}} in the Presence of Field Inhomogeneities}, + author = {Sutton, B.P. and Noll, D.C. and Fessler, J.A.}, + volume = 22, + number = 2, + pages = {178--188}, + doi = {10.1109/TMI.2002.808360}, + issn = {1558-254X}, + date = {2003-02}, journaltitle = {IEEE Transactions on Medical Imaging}, - volume = {22}, - number = {2}, - pages = {178--188}, - issn = {1558-254X}, - doi = {10.1109/TMI.2002.808360}, - abstract = {In magnetic resonance imaging, magnetic field inhomogeneities cause distortions in images that are reconstructed by conventional fast Fourier transform (FFT) methods. Several noniterative image reconstruction methods are used currently to compensate for field inhomogeneities, but these methods assume that the field map that characterizes the off-resonance frequencies is spatially smooth. Recently, iterative methods have been proposed that can circumvent this assumption and provide improved compensation for off-resonance effects. However, straightforward implementations of such iterative methods suffer from inconveniently long computation times. This paper describes a tool for accelerating iterative reconstruction of field-corrected MR images: a novel time-segmented approximation to the MR signal equation. We use a min-max formulation to derive the temporal interpolator. Speedups of around 60 were achieved by combining this temporal interpolator with a nonuniform fast Fourier transform with normalized root mean squared approximation errors of 0.07\%. The proposed method provides fast, accurate, field-corrected image reconstruction even when the field map is not smooth.}, - eventtitle = {{{IEEE Transactions}} on {{Medical Imaging}}}, - keywords = {Biomedical engineering,Frequency,Image reconstruction,Image segmentation,Iterative methods,Magnetic fields,Magnetic resonance imaging,Optical imaging,Reconstruction algorithms,Spirals}, - file = {/volatile/home/pc266769/Zotero/storage/8XA5ZU44/sutton_fast_2003.pdf} + eventtitle = {{{IEEE Transactions}} on {{Medical Imaging}}} } - @article{fessler_nonuniform_2003, - title = {Nonuniform Fast Fourier Transforms Using Min-Max Interpolation}, - author = {Fessler, J.A. and Sutton, B.P.}, - date = {2003-02}, + title = {Nonuniform Fast Fourier Transforms Using Min-Max Interpolation}, + author = {Fessler, J.A. and Sutton, B.P.}, + volume = 51, + number = 2, + pages = {560--574}, + doi = {10.1109/tsp.2002.807005}, + issn = {1053-587X}, + url = {http://ieeexplore.ieee.org/document/1166689/}, + urldate = {2021-05-03}, + date = {2003-02}, journaltitle = {IEEE Transactions on Signal Processing}, shortjournal = {IEEE Trans. Signal Process.}, - volume = {51}, - number = {2}, - pages = {560--574}, - issn = {1053-587X}, - doi = {10.1109/tsp.2002.807005}, - url = {http://ieeexplore.ieee.org/document/1166689/}, - urldate = {2021-05-03}, - abstract = {The FFT is used widely in signal processing for efficient computation of the Fourier transform (FT) of finitelength signals over a set of uniformly-spaced frequency locations. However, in many applications, one requires nonuniform sampling in the frequency domain, i.e., a nonuniform FT. Several papers have described fast approximations for the nonuniform FT based on interpolating an oversampled FFT. This paper presents an interpolation method for the nonuniform FT that is optimal in the min-max sense of minimizing the worst-case approximation error over all signals of unit norm. The proposed method easily generalizes to multidimensional signals. Numerical results show that the min-max approach provides substantially lower approximation errors than conventional interpolation methods. The min-max criterion is also useful for optimizing the parameters of interpolation kernels such as the Kaiser-Bessel function.}, - langid = {english}, - file = {/volatile/home/pc266769/Zotero/storage/4NDF5834/fessler_nonuniform_2003.pdf} + langid = {english} } - @article{wang_efficient_2023, - title = {Efficient Approximation of {{Jacobian}} Matrices Involving a Non-Uniform Fast {{Fourier}} Transform ({{NUFFT}})}, - author = {Wang, Guanhua and Fessler, Jeffrey A.}, - date = {2023}, + title = {Efficient Approximation of {{Jacobian}} Matrices Involving a Non-Uniform Fast {{Fourier}} Transform ({{NUFFT}})}, + author = {Wang, Guanhua and Fessler, Jeffrey A.}, + volume = 9, + pages = {43--54}, + doi = {10.1109/TCI.2023.3240081}, + issn = {2333-9403, 2334-0118, 2573-0436}, + url = {http://arxiv.org/abs/2111.02912}, + urldate = {2024-04-11}, + date = 2023, journaltitle = {IEEE Transactions on Computational Imaging}, shortjournal = {IEEE Trans. Comput. Imaging}, - volume = {9}, - eprint = {2111.02912}, - eprinttype = {arXiv}, - eprintclass = {eess}, - pages = {43--54}, - issn = {2333-9403, 2334-0118, 2573-0436}, - doi = {10.1109/TCI.2023.3240081}, - url = {http://arxiv.org/abs/2111.02912}, - urldate = {2024-04-11}, - abstract = {There is growing interest in learning k-space sampling patterns for MRI using optimization approaches [1], [2], [3], [4]. For non-Cartesian sampling patterns, reconstruction methods typically involve non-uniform FFT (NUFFT) operations. A typical NUFFT method contains frequency domain interpolation using Kaiser-Bessel kernel values that are retrieved by nearest neighbor look-up in a finely tabulated kernel [5]. That look-up operation is not differentiable with respect to the sampling pattern, complicating auto-differentiation routines for backpropagation (stochastic gradient descent) for sampling pattern optimization. This paper describes an efficient and accurate approach for computing approximate gradients with respect to the sampling pattern for learning k-space sampling. Various numerical experiments validate the accuracy of the proposed approximation. We also showcase the trajectories optimized for different iterative reconstruction algorithms, including smooth convex regularized reconstruction and compressed sensing-based reconstruction.}, - langid = {english}, - keywords = {Electrical Engineering and Systems Science - Image and Video Processing,Electrical Engineering and Systems Science - Signal Processing}, - file = {/volatile/home/pc266769/Zotero/storage/HU6FNVQU/Wang et Fessler - 2023 - Efficient approximation of Jacobian matrices invol.pdf} + eprint = {2111.02912}, + eprinttype = {arXiv}, + eprintclass = {eess}, + langid = {english} } @inproceedings{knoll_gpunufft_2014, - title={gpuNUFFT - An Open Source GPU Library for 3D Regridding with Direct Matlab Interface}, - author={Florian Knoll and Andreas Schwarzl and Clemens Diwoky and Daniel K. Sodickson}, - year={2014}, - url={https://api.semanticscholar.org/CorpusID:53652346} + title = {gpuNUFFT - An Open Source GPU Library for 3D Regridding with Direct Matlab Interface}, + author = {Florian Knoll and Andreas Schwarzl and Clemens Diwoky and Daniel K. Sodickson}, + year = 2014, + url = {https://api.semanticscholar.org/CorpusID:53652346} } @inproceedings{muckley_torchkbnufft_2020, - author = {M. J. Muckley and R. Stern and T. Murrell and F. Knoll}, - title = {{TorchKbNufft}: A High-Level, Hardware-Agnostic Non-Uniform Fast {Fourier} Transform}, - booktitle = {ISMRM Workshop on Data Sampling \& Image Reconstruction}, - year = 2020, - note = {Source code available at https://github.com/mmuckley/torchkbnufft}, + title = {{TorchKbNufft}: A High-Level, Hardware-Agnostic Non-Uniform Fast {Fourier} Transform}, + author = {M. J. Muckley and R. Stern and T. Murrell and F. Knoll}, + year = 2020, + booktitle = {ISMRM Workshop on Data Sampling \& Image Reconstruction}, + note = {Source code available at https://github.com/mmuckley/torchkbnufft} } @inproceedings{comby_snake-fmri_2024, - ids = {Comby_Vignaud_Ciuciu_2024}, - title = {{{SNAKE-fMRI}}: {{A}} Modular {{fMRI}} Simulator from the Space-Time Domain to k-Space Data and Back}, - booktitle = {{{ISMRM}} Annual Meeting, (in Press)}, - author = {Comby, P.-A. and Vignaud, A. and Ciuciu, P.}, - date = {2024}, - location = {Singapore}, - keywords = {No DOI found} + title = {{{SNAKE-fMRI}}: {{A}} Modular {{fMRI}} Simulator from the Space-Time Domain to k-Space Data and Back}, + author = {Comby, P.-A. and Vignaud, A. and Ciuciu, P.}, + booktitle = {{{ISMRM}} Annual Meeting, (in Press)}, + location = {Singapore}, + date = 2024 } - @article{farrens_pysap_2020, - title = {{{PySAP}}: {{Python Sparse Data Analysis Package}} for Multidisciplinary Image Processing}, - shorttitle = {{{PySAP}}}, - author = {Farrens, S. and Grigis, A. and El Gueddari, L. and Ramzi, Z. and G.r., Chaithya and Starck, S. and Sarthou, B. and Cherkaoui, H. and Ciuciu, P. and Starck, J. -L.}, - date = {2020-07-01}, + title = {{{PySAP}}: {{Python Sparse Data Analysis Package}} for Multidisciplinary Image Processing}, + shorttitle = {{PySAP}}, + author = {Farrens, S. and Grigis, A. and El Gueddari, L. and Ramzi, Z. and G.r., Chaithya and Starck, S. and Sarthou, B. and Cherkaoui, H. and Ciuciu, P. and Starck, J. -L.}, + volume = 32, + pages = 100402, + doi = {10.1016/j.ascom.2020.100402}, + issn = {2213-1337}, + url = {https://www.sciencedirect.com/science/article/pii/S2213133720300561}, + urldate = {2024-09-27}, + date = {2020-07-01}, journaltitle = {Astronomy and Computing}, - shortjournal = {Astronomy and Computing}, - volume = {32}, - pages = {100402}, - issn = {2213-1337}, - doi = {10.1016/j.ascom.2020.100402}, - url = {https://www.sciencedirect.com/science/article/pii/S2213133720300561}, - urldate = {2024-09-27}, - abstract = {We present the open-source image processing software package PySAP (Python Sparse data Analysis Package) developed for the COmpressed Sensing for Magnetic resonance Imaging and Cosmology (COSMIC) project. This package provides a set of flexible tools that can be applied to a variety of compressed sensing and image reconstruction problems in various research domains. In particular, PySAP offers fast wavelet transforms and a range of integrated optimisation algorithms. In this paper we present the features available in PySAP and provide practical demonstrations on astrophysical and magnetic resonance imaging data.}, - keywords = {Convex optimisation,Image processing,Open-source software,Reconstruction}, - file = {/volatile/home/pc266769/Zotero/storage/X4725MSA/Farrens et al. - 2020 - PySAP Python Sparse Data Analysis Package for multidisciplinary image processing.pdf} + shortjournal = {Astronomy and Computing} } - @software{tachella_deepinverse_2023, - title = {{{DeepInverse}}: {{A}} Deep Learning Framework for Inverse Problems in Imaging}, - shorttitle = {{{DeepInverse}}}, - author = {Tachella, Julian and Chen, Dongdong and Hurault, Samuel and Terris, Matthieu and Wang, Andrew}, - date = {2023-06}, - doi = {10.5281/zenodo.7982256}, - url = {https://github.com/deepinv/deepinv}, - urldate = {2024-09-27}, - abstract = {PyTorch library for solving imaging inverse problems using deep learning}, - version = {latest} + title = {{{DeepInverse}}: {{A}} Deep Learning Framework for Inverse Problems in Imaging}, + shorttitle = {{DeepInverse}}, + author = {Tachella, Julian and Chen, Dongdong and Hurault, Samuel and Terris, Matthieu and Wang, Andrew}, + doi = {10.5281/zenodo.7982256}, + url = {https://github.com/deepinv/deepinv}, + urldate = {2024-09-27}, + date = {2023-06}, + version = {latest} } - @inproceedings{gueddari_pysap-mri_2020, - ids = {gueddari_pysap-mri_2020-1,gueddari_pysap-mri_2020-2}, - title = {{{PySAP-MRI}}: A Python Package for {{MR}} Image Reconstruction}, - booktitle = {{{ISMRM}} Workshop on Data Sampling and Image Reconstruction}, - author = {Gueddari, Loubna and Gr, Chaithya and Ramzi, Zaccharie and Farrens, Samuel and Starck, Sophie and Grigis, Antoine and Starck, Jean-Luc and Ciuciu, Philippe}, - year = {2020}, + title = {{{PySAP-MRI}}: A Python Package for {{MR}} Image Reconstruction}, + author = {Gueddari, Loubna and Gr, Chaithya and Ramzi, Zaccharie and Farrens, Samuel and Starck, Sophie and Grigis, Antoine and Starck, Jean-Luc and Ciuciu, Philippe}, + year = 2020, + booktitle = {{{ISMRM}} Workshop on Data Sampling and Image Reconstruction} } - From 5a72436700bbdaebe4b3e0e2daea7eb8cbd68da5 Mon Sep 17 00:00:00 2001 From: Pierre-antoine Comby Date: Tue, 19 Nov 2024 13:33:21 +0100 Subject: [PATCH 7/8] feat: add doi --- docs/paper-joss/paper.bib | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/docs/paper-joss/paper.bib b/docs/paper-joss/paper.bib index 2cf9fb4b..d9e70f9b 100644 --- a/docs/paper-joss/paper.bib +++ b/docs/paper-joss/paper.bib @@ -86,12 +86,15 @@ @inproceedings{muckley_torchkbnufft_2020 booktitle = {ISMRM Workshop on Data Sampling \& Image Reconstruction}, note = {Source code available at https://github.com/mmuckley/torchkbnufft} } -@inproceedings{comby_snake-fmri_2024, - title = {{{SNAKE-fMRI}}: {{A}} Modular {{fMRI}} Simulator from the Space-Time Domain to k-Space Data and Back}, - author = {Comby, P.-A. and Vignaud, A. and Ciuciu, P.}, - booktitle = {{{ISMRM}} Annual Meeting, (in Press)}, - location = {Singapore}, - date = 2024 +@misc{comby_snake-fmri_2024, + title={SNAKE-fMRI: A modular fMRI data simulator from the space-time domain to k-space and back}, + author={Pierre-Antoine Comby and Alexandre Vignaud and Philippe Ciuciu}, + year={2024}, + eprint={2404.08282}, + archivePrefix={arXiv}, + primaryClass={eess.SP}, + url={https://arxiv.org/abs/2404.08282}, + doi={10.48550/arXiv.2404.08282} } @article{farrens_pysap_2020, title = {{{PySAP}}: {{Python Sparse Data Analysis Package}} for Multidisciplinary Image Processing}, @@ -121,5 +124,6 @@ @inproceedings{gueddari_pysap-mri_2020 title = {{{PySAP-MRI}}: A Python Package for {{MR}} Image Reconstruction}, author = {Gueddari, Loubna and Gr, Chaithya and Ramzi, Zaccharie and Farrens, Samuel and Starck, Sophie and Grigis, Antoine and Starck, Jean-Luc and Ciuciu, Philippe}, year = 2020, + url = {https://inria.hal.science/hal-02399267}, booktitle = {{{ISMRM}} Workshop on Data Sampling and Image Reconstruction} } From 6e04a369d5db47119760c5b243a50849f83a9d16 Mon Sep 17 00:00:00 2001 From: Pierre-antoine Comby Date: Tue, 19 Nov 2024 13:52:59 +0100 Subject: [PATCH 8/8] feat: add CONTRIBUTING.md --- CONTRIBUTING.md | 165 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 CONTRIBUTING.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..ff44b1c3 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,165 @@ + +# Contributing to MRI-NUFFT + +First off, thanks for taking the time to contribute! ❤️ + +All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉 + +> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about: +> - Star the project +> - Tweet about it +> - Refer this project in your project's readme +> - Mention the project at local meetups and tell your friends/colleagues + + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [I Have a Question](#i-have-a-question) + - [I Want To Contribute](#i-want-to-contribute) + - [Reporting Bugs](#reporting-bugs) + - [Suggesting Enhancements](#suggesting-enhancements) + - [Your First Code Contribution](#your-first-code-contribution) + - [Improving The Documentation](#improving-the-documentation) +- [Styleguides](#styleguides) + - [Commit Messages](#commit-messages) +- [Join The Project Team](#join-the-project-team) + + +## Code of Conduct + +This project and everyone participating in it is governed by the +[MRI-NUFFT Code of Conduct](https://github.com/mind-inria/mri-nufft/blob/master/CODE_OF_CONDUCT.md). +By participating, you are expected to uphold this code. Please report unacceptable behavior +to or . + + +## I Have a Question + +> If you want to ask a question, we assume that you have read the available [Documentation](https://mind-inria.github.io.mri-nufft/). + +Before you ask a question, it is best to search for existing [Issues](https://github.com/mind-inria/mri-nufft/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first. + +If you then still feel the need to ask a question and need clarification, we recommend the following: + +- Open an [Issue](https://github.com/mind-inria/mri-nufft/issues/new). +- Provide as much context as you can about what you're running into. +- Provide project and platform versions (python, CUDA, torch versions notably), depending on what seems relevant. + +We will then take care of the issue as soon as possible. + + +## I Want To Contribute + +> ### Legal Notice +> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project licence (BSD-3-Clause). + +### Reporting Bugs + + +#### Before Submitting a Bug Report + +A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible. + +- Make sure that you are using the latest version. +- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](https://mind-inria.github.io.mri-nufft/). If you are looking for support, you might want to check [this section](#i-have-a-question)). +- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/mind-inria/mri-nufft/issues?q=label%3Abug). +- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue. +- Collect information about the bug: + - Stack trace (Traceback) + - OS, Platform and Version (Windows, Linux, macOS, x86, ARM) + - Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant. + - Possibly your input and the output + - Can you reliably reproduce the issue? And can you also reproduce it with older versions? + + +#### How Do I Submit a Good Bug Report? + +> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to . + + +We use GitHub issues to track bugs and errors. If you run into an issue with the project: + +- Open an [Issue](https://github.com/mind-inria/mri-nufft/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.) +- Explain the behavior you would expect and the actual behavior. +- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case. +- Provide the information you collected in the previous section. + +Once it's filed: + +- The project team will label the issue accordingly. +- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced. +- If the team is able to reproduce the issue, it will be marked `bug`, as well as possibly other tags (such as `critical`), and the issue will be left to be [implemented by someone](#your-first-code-contribution). + + + + +### Suggesting Enhancements + +This section guides you through submitting an enhancement suggestion for MRI-NUFFT, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions. + + +#### Before Submitting an Enhancement + +- Make sure that you are using the latest version. +- Read the [documentation](https://mind-inria.github.io.mri-nufft/) carefully and find out if the functionality is already covered, maybe by an individual configuration. +- Perform a [search](https://github.com/mind-inria/mri-nufft/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one. +- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library. + + +#### How Do I Submit a Good Enhancement Suggestion? + +Enhancement suggestions are tracked as [GitHub issues](https://github.com/mind-inria/mri-nufft/issues). + +- Use a **clear and descriptive title** for the issue to identify the suggestion. +- Provide a **step-by-step description of the suggested enhancement** in as many details as possible. +- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you. +- **Explain why this enhancement would be useful** to most MRI-NUFFT users. You may also want to point out the other projects that solved it better and which could serve as inspiration. + + + +### Your First Code Contribution + +The project team welcomes your code contributions. Before you start working with the project, please follow these steps: + +```bash + git clone https://github.com:mind-inria/mri-nufft + pip install -e ./mri-nufft[dev] +``` +Then proceed to implement your changes and if possible, add tests and/or example code. When you are done, submit a pull request. + + +### Improving The Documentation + +The documentation of MRI-NUFFT can be found at [https://mind-inria.github.io/mri-nufft/](https://mind-inria.github.io/mri-nufft/). If you want to help improve the documentation, you can do so by: +- Adding new examples script to the documentation (in the `examples` folder). +- Improving the existing documentation (in the `docs` folder) + +The documentation is written in the [ReStructuredText](https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html) format and built using Sphinx. +To build the documentation locally, you can run the following commands : +```bash + git clone https://github.com:mind-inria/mri-nufft + pip install -e ./mri-nufft[dev,doc] + cd mri-nufft/docs + # do your changes + cd .. + python -m sphinx-build docs docs_build + # view the documentation in your browser + python -m http.server --directory docs_build 8080 + # open localhost:8080 in your browser +``` + + +## Styleguides +### Docstrings +We use Numpy style docstrings. You can find more information about them [here](https://numpydoc.readthedocs.io/en/latest/format.html). + +### Commit Messages +We recommend (but do not enforce) using the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) format for commit messages. This format allows for better readability and automatic Changelog generation. + +## Attribution +This guide is based on the [contributing.md](https://contributing.md/generator)! +