Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

probe from numpy array will not be rescaled to photon count #596

Open
wants to merge 12 commits into
base: dev
Choose a base branch
from
Open
32 changes: 13 additions & 19 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,54 +20,48 @@ on:
jobs:
build-linux:
runs-on: ubuntu-latest
defaults:
run:
shell: bash -el {0}
strategy:
max-parallel: 10
fail-fast: false
matrix:
python-version:
- "3.8"
- "3.9"
- "3.10"
- "3.11"
- "3.12"
- "3.13"
conda-env:
- "core"
- "full"
name: Python ${{ matrix.python-version }} and ${{ matrix.conda-env }} dependencies
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
- uses: conda-incubator/setup-miniconda@v3
with:
python-version: ${{ matrix.python-version }}
- name: Add conda to system path
run: |
# $CONDA is an environment variable pointing to the root of the miniconda directory
echo $CONDA/bin >> $GITHUB_PATH
conda --version
conda info
- name: Make sure conda is updated
run: |
conda update conda
conda --version
auto-update-conda: true
auto-activate-base: false
conda-remove-defaults: true
channels: conda-forge
activate-environment: ptypy_env
python-version: ${{ matrix.python-version }}
- name: Install ${{ matrix.conda-env }} dependencies
run: |
# replace python version in dependencies
sed -i 's/python/python=${{ matrix.python-version }}/' dependencies_${{ matrix.conda-env }}.yml
if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.12' ]; then
sed -i '/- pyfftw/d' dependencies_${{ matrix.conda-env }}.yml
fi
# if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.8' ]; then
# sed -i '/- mpi4py/d' dependencies_${{ matrix.conda-env }}.yml
# fi
# if [ ${{ matrix.conda-env }} == 'full' ] && [ ${{ matrix.python-version }} == '3.9' ]; then
# sed -i '/- mpi4py/d' dependencies_${{ matrix.conda-env }}.yml
# fi
conda install --solver=classic mpich
conda env update --file dependencies_${{ matrix.conda-env }}.yml --name base
conda env update --file dependencies_${{ matrix.conda-env }}.yml --name ptypy_env
conda install --solver=classic flake8 pytest pytest-cov
conda list
conda list
- name: Prepare ptypy
run: |
# Install ptypy
Expand Down
5 changes: 3 additions & 2 deletions cufft/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import os, re
import subprocess
import sysconfig
from distutils.unixccompiler import UnixCCompiler
from distutils.command.build_ext import build_ext
from setuptools._distutils.unixccompiler import UnixCCompiler
from setuptools.command.build_ext import build_ext


def find_in_path(name, path):
Expand Down Expand Up @@ -116,6 +116,7 @@ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
compiler_command = [self.CUDA["nvcc"]] + self.NVCC_FLAGS + self.OPTFLAGS + ["-Xcompiler"] + self.CXXFLAGS + CPPFLAGS
compiler_exec = " ".join(compiler_command)
self.set_executable('compiler_so', compiler_exec)
self.set_executable('compiler_so_cxx', compiler_exec)
postargs = [] # we don't actually have any postargs
super(NvccCompiler, self)._compile(obj, src, ext, cc_args, postargs, pp_opts) # the _compile method
# reset the default compiler_so, which we might have changed for cuda
Expand Down
2 changes: 1 addition & 1 deletion cufft/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

# we should aim to remove the distutils dependency
import setuptools
from distutils.core import setup, Extension
from setuptools import setup, Extension
import os

ext_modules = []
Expand Down
1 change: 1 addition & 0 deletions dependencies_dev.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ dependencies:
- pyzmq
- pep8
- mpi4py
- packaging
- pillow
- pyfftw
- pip
Expand Down
1 change: 1 addition & 0 deletions dependencies_full.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ dependencies:
- h5py
- pyzmq
- mpi4py
- packaging
- pillow
- pyfftw
- pyyaml
Expand Down
30 changes: 15 additions & 15 deletions doc/script2rst.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import sys
import io
from importlib.resources import files
import contextlib
import os

Expand All @@ -9,13 +10,12 @@
'simupod.py',
'ownengine.py',
'subclassptyscan.py']
_ptypy_dir = files('ptypy')

if len(sys.argv) == 1:
import pkg_resources

for script in scripts:
scr = pkg_resources.resource_filename('ptypy', tutorial_dir+script)
if not os.path.exists(scr):
scr = _ptypy_dir / (tutorial_dir + script)
if not scr.exists():
print('Using backup tutorial for %s' % script)
scr = '../tutorial/'+script
#subprocess.call(['python',sys.argv[0]+' '+scr]) # doesn't work
Expand Down Expand Up @@ -50,13 +50,13 @@ def stdoutIO(stdout=None):
frst.write("""
.. note::
This tutorial was generated from the python source
:file:`[ptypy_root]/tutorial/%(fname)s` using :file:`ptypy/doc/%(this)s`.
:file:`[ptypy_root]/tutorial/%(fname)s` using :file:`ptypy/doc/%(this)s`.
You are encouraged to modify the parameters and rerun the tutorial with::

$ python [ptypy_root]/tutorial/%(fname)s

""" % {'fname': os.path.split(script_name)[-1], 'this': sys.argv[0]})

was_comment = True

while True:
Expand Down Expand Up @@ -86,7 +86,7 @@ def stdoutIO(stdout=None):
frst.write(' '+line2[1:].strip()+'\n')
frst.write('\n')
continue

if line.startswith('"""'):
frst.write('.. parsed-literal::\n\n')
while True:
Expand All @@ -95,11 +95,11 @@ def stdoutIO(stdout=None):
break
frst.write(' ' + line2)
continue

decorator = False
indent = False
for key in indent_keys:
if line.startswith(key):
if line.startswith(key):
indent = True
break

Expand All @@ -125,12 +125,12 @@ def stdoutIO(stdout=None):
pt = fpy.tell()
exec(func+'\n')
continue

wline = line.strip()
if not wline:
frst.write('\n')
continue

with stdoutIO() as sout:
exec(wline)
out = sout.getvalue()
Expand All @@ -150,15 +150,15 @@ def stdoutIO(stdout=None):
if was_comment:
wline = '\n::\n\n'+wline
was_comment = False

frst.write(wline+'\n')

#print out
if out.strip():
print(out)
for l in out.split('\n'):
frst.write(' '*3+l+'\n')
out = ''



8 changes: 4 additions & 4 deletions ptypy/accelerate/cuda_cupy/multi_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
- OpenMPI in a conda install needs to have the environment variable
--> if cuda support isn't enabled, the application simply crashes with a seg fault

2) For NCCL peer-to-peer transfers, the EXCLUSIVE compute mode cannot be used.
2) For NCCL peer-to-peer transfers, the EXCLUSIVE compute mode cannot be used.
It should be in DEFAULT mode.

"""

from pkg_resources import parse_version
from packaging.version import parse
import numpy as np
import cupy as cp
from ptypy.utils import parallel
Expand Down Expand Up @@ -44,7 +44,7 @@
have_cuda_mpi = (mpi4py is not None) and \
"OMPI_MCA_opal_cuda_support" in os.environ and \
os.environ["OMPI_MCA_opal_cuda_support"] == "true" and \
parse_version(parse_version(mpi4py.__version__).base_version) >= parse_version("3.1.0") and \
parse(parse(mpi4py.__version__).base_version) >= parse("3.1.0") and \
not ('PTYPY_USE_MPI' in os.environ)


Expand Down Expand Up @@ -114,7 +114,7 @@ def allReduceSum(self, arr):

count, datatype = self.__get_NCCL_count_dtype(arr)

self.com.allReduce(arr.data.ptr, arr.data.ptr, count, datatype, nccl.NCCL_SUM,
self.com.allReduce(arr.data.ptr, arr.data.ptr, count, datatype, nccl.NCCL_SUM,
cp.cuda.get_current_stream().ptr)

def __get_NCCL_count_dtype(self, arr):
Expand Down
10 changes: 5 additions & 5 deletions ptypy/accelerate/cuda_pycuda/multi_gpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

Findings:

1) NCCL works with unit tests, but not in the engines. It seems to
1) NCCL works with unit tests, but not in the engines. It seems to
add something to the existing pycuda Context or create a new one,
as a later event recording on an exit wave transfer fails with
'ivalid resource handle' Cuda Error. This error typically happens if for example
Expand All @@ -22,14 +22,14 @@
- OpenMPI in a conda install needs to have the environment variable
--> if cuda support isn't enabled, the application simply crashes with a seg fault

4) For NCCL peer-to-peer transfers, the EXCLUSIVE compute mode cannot be used.
4) For NCCL peer-to-peer transfers, the EXCLUSIVE compute mode cannot be used.
It should be in DEFAULT mode.

5) NCCL support has been dropped from PyCUDA module, but can be used with CuPy module instead

"""

from pkg_resources import parse_version
from packaging.version import parse
import numpy as np
from pycuda import gpuarray
import pycuda.driver as cuda
Expand All @@ -54,7 +54,7 @@
have_cuda_mpi = (mpi4py is not None) and \
"OMPI_MCA_opal_cuda_support" in os.environ and \
os.environ["OMPI_MCA_opal_cuda_support"] == "true" and \
parse_version(parse_version(mpi4py.__version__).base_version) >= parse_version("3.1.0") and \
parse(parse(mpi4py.__version__).base_version) >= parse("3.1.0") and \
hasattr(gpuarray.GPUArray, '__cuda_array_interface__') and \
not ('PTYPY_USE_MPI' in os.environ)

Expand Down Expand Up @@ -97,7 +97,7 @@ def allReduceSum(self, arr):
if parallel.MPIenabled:
comm = parallel.comm
comm.Allreduce(parallel.MPI.IN_PLACE, arr)


# pick the appropriate communicator depending on installed packages
def get_multi_gpu_communicator(use_cuda_mpi=True):
Expand Down
18 changes: 10 additions & 8 deletions ptypy/core/illumination.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,11 +134,13 @@
userlevel = 0

[photons]
type = int, float, None
default = None
type = int, float, str, None
default = 'maxdiff'
help = Number of photons in the incident illumination
doc = A value specified here will take precedence over calculated statistics from the loaded data.
lowlim = 0
doc = A value specified here will take precedence over calculated statistics from the loaded data. Choices:
- ``None`` : modeled or loaded probe remains unscaled in intensity
- ``int`` or ``float`` : modeled or loaded probe intensity is scaled to that number of photons
- ``'maxdiff'`` : modeled or loaded probe intensity is scaled to match the brightest diffraction pattern
userlevel = 2

[propagation]
Expand Down Expand Up @@ -311,7 +313,8 @@ def init_storage(storage, pars, energy=None, **kwargs):

p = DEFAULT.copy(depth=3)
model = None
if hasattr(pars, 'items') or hasattr(pars, 'items'):

if hasattr(pars, 'items'):
# This is a dict
p.update(pars, in_place_depth=3)

Expand Down Expand Up @@ -362,7 +365,7 @@ def init_storage(storage, pars, energy=None, **kwargs):

if p.model is None:
model = np.ones(s.shape, s.dtype)
if p.photons is not None:
if (type(p.photons) is int) or (type(p.photons) is float):
model *= np.sqrt(p.photons) / np.prod(s.shape)
elif type(p.model) is np.ndarray:
model = p.model
Expand All @@ -377,7 +380,6 @@ def init_storage(storage, pars, energy=None, **kwargs):
'Attempt to load layer `%s` of probe storage with ID `%s` from `%s`'
% (str(layer), str(ID), p.recon.rfile))
model = u.load_from_ptyr(p.recon.rfile, 'probe', ID, layer)
p.photons = None
# This could be more sophisticated,
# i.e. matching the real space grids etc.
elif str(p.model) == 'stxm':
Expand Down Expand Up @@ -475,7 +477,7 @@ def _process(model, aperture_pars=None, prop_pars=None, photons=1e7,
model = prop(model)

# apply photon count
if photons is not None:
if (type(photons) is int) or (type(photons) is float):
model *= np.sqrt(photons / u.norm2(model))

return model
Expand Down
24 changes: 16 additions & 8 deletions ptypy/core/manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1130,18 +1130,26 @@ def _initialize_probe(self, probe_ids):
# Bypass additional tests if input is a string (previous reconstruction)
if illu_pars != str(illu_pars):

# if photon count is None, assign a number from the stats.
phot = illu_pars.get('photons')
phot_max = self.diff.max_power

if phot is None:
if phot == 'maxdiff':
# probe intensity to be scaled to the brightest diffraction pattern
logger.info(
'Found no photon count for probe in parameters.\nUsing photon count %.2e from photon report' % phot_max)
'Probe intensity is being rescaled to match the brightest diffraction pattern.\nUsing photon count %.2e from photon report' % phot_max)
illu_pars['photons'] = phot_max
elif np.abs(np.log10(phot) - np.log10(phot_max)) > 1:
logger.warning(
'Photon count from input parameters (%.2e) differs from statistics (%.2e) by more than a magnitude' % (
phot, phot_max))
elif phot is None:
# probe intensity to remain untouched
pass
elif (type(phot) is int) or (type(phot) is float):
# probe intensity to be scaled to a specific value
if phot < 0:
logger.warning(
f'Given photon count is negative. Using the absolute of the given value: {-1 * phot:.2e}')
phot = -1 * phot
if np.abs(np.log10(phot) - np.log10(phot_max)) > 1:
logger.warning(
'Photon count from input parameters (%.2e) differs from statistics (%.2e) by more than a magnitude' % (
phot, phot_max))

if (self.p.coherence.num_probe_modes > 1) and (type(illu_pars) is not np.ndarray):

Expand Down
Loading
Loading