diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 0c68f183..edd8614f 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -18,6 +18,13 @@ jobs: python-version: ["3.8", "3.9", "3.10"] numpy-version: ["1.21"] pyqt-version: ["5.12"] + include: + - host-os: "ubuntu-latest" + conda-mamba: "mamba" + - host-os: "macos-latest" + conda-mamba: "mamba" + - host-os: "windows-latest" + conda-mamba: "conda" fail-fast: false runs-on: ${{ matrix.host-os }} @@ -76,7 +83,9 @@ jobs: run: | set -vxeo pipefail - conda install -y \ + conda install mamba -n base -c conda-forge + + ${{ matrix.conda-mamba }} install -y \ python=${{ matrix.python-version }} \ numpy=${{ matrix.numpy-version }} \ pyqt=${{ matrix.pyqt-version }} \ @@ -94,7 +103,7 @@ jobs: set -vxeo pipefail # There are issues in building wheels for pystackreg>0.2.2 on OSX, so install pystackreg from CF - conda install -y pystackreg -c conda-forge + ${{ matrix.conda-mamba }} install -y pystackreg -c conda-forge conda list pip list @@ -103,7 +112,7 @@ jobs: run: | set -vxeuo pipefail - conda install -y pystackreg -c conda-forge # Some issue with pystackreg from PyPI + ${{ matrix.conda-mamba }} install -y pystackreg -c conda-forge # Some issue with pystackreg from PyPI # pip install --upgrade pip setuptools pip install codecov diff --git a/pyxrf/core/map_processing.py b/pyxrf/core/map_processing.py index de56bb65..4c2bb3a1 100644 --- a/pyxrf/core/map_processing.py +++ b/pyxrf/core/map_processing.py @@ -468,6 +468,12 @@ def _prepare_xrf_mask(data, mask=None, selection=None): return mask +def _masked_sum(data, mask): + mask = np.broadcast_to(np.expand_dims(mask, axis=2), data.shape) + sm = np.sum(np.sum(data * mask, axis=0), axis=0) + return np.array([[sm]]) + + def compute_total_spectrum( data, *, selection=None, mask=None, chunk_pixels=5000, n_chunks_min=4, progress_bar=None, client=None ): @@ -519,12 +525,6 @@ def compute_total_spectrum( if mask is None: result_fut = da.sum(da.sum(data, axis=0), axis=0).persist(scheduler=client) else: - - def _masked_sum(data, mask): - mask = np.broadcast_to(np.expand_dims(mask, axis=2), data.shape) - sm = np.sum(np.sum(data * mask, axis=0), axis=0) - return np.array([[sm]]) - result_fut = da.blockwise(_masked_sum, "ijk", data, "ijk", mask, "ij", dtype="float").persist( scheduler=client ) @@ -550,6 +550,22 @@ def _masked_sum(data, mask): return result +def _process_block(data): + data = data[0] # Data is passed as a list of ndarrays + _spectrum = np.sum(np.sum(data, axis=0), axis=0) + _count_total = np.sum(data, axis=2) + return np.array([[{"spectrum": _spectrum, "count_total": _count_total}]]) + + +def _process_block_with_mask(data, mask): + data = data[0] # Data is passed as a list of ndarrays + mask = np.broadcast_to(np.expand_dims(mask, axis=2), data.shape) + masked_data = data * mask + _spectrum = np.sum(np.sum(masked_data, axis=0), axis=0) + _count_total = np.sum(masked_data, axis=2) + return np.array([[{"spectrum": _spectrum, "count_total": _count_total}]]) + + def compute_total_spectrum_and_count( data, *, selection=None, mask=None, chunk_pixels=5000, n_chunks_min=4, progress_bar=None, client=None ): @@ -602,25 +618,9 @@ def compute_total_spectrum_and_count( logger.info(f"Dask distributed client: {n_workers} workers") if mask is None: - - def _process_block(data): - data = data[0] # Data is passed as a list of ndarrays - _spectrum = np.sum(np.sum(data, axis=0), axis=0) - _count_total = np.sum(data, axis=2) - return np.array([[{"spectrum": _spectrum, "count_total": _count_total}]]) - result_fut = da.blockwise(_process_block, "ij", data, "ijk", dtype=float).persist(scheduler=client) else: - - def _process_block(data, mask): - data = data[0] # Data is passed as a list of ndarrays - mask = np.broadcast_to(np.expand_dims(mask, axis=2), data.shape) - masked_data = data * mask - _spectrum = np.sum(np.sum(masked_data, axis=0), axis=0) - _count_total = np.sum(masked_data, axis=2) - return np.array([[{"spectrum": _spectrum, "count_total": _count_total}]]) - - result_fut = da.blockwise(_process_block, "ij", data, "ijk", mask, "ij", dtype=float).persist( + result_fut = da.blockwise(_process_block_with_mask, "ij", data, "ijk", mask, "ij", dtype=float).persist( scheduler=client ) diff --git a/pyxrf/db_config/tes_db_config.py b/pyxrf/db_config/tes_db_config.py index ab73c5a8..a7804c38 100644 --- a/pyxrf/db_config/tes_db_config.py +++ b/pyxrf/db_config/tes_db_config.py @@ -23,7 +23,10 @@ class BulkXSPRESS(HandlerBase): - HANDLER_NAME = "XPS3_FLY" + specs = { + "XPS3_FLY", # Old incorrect name + "XSP3_FLY", + } def __init__(self, resource_fn): self._handle = h5py.File(resource_fn, "r") @@ -32,4 +35,5 @@ def __call__(self): return self._handle["entry/instrument/detector/data"][:] -db.reg.register_handler(BulkXSPRESS.HANDLER_NAME, BulkXSPRESS, overwrite=True) +for spec in BulkXSPRESS.specs: + db.reg.register_handler(spec, BulkXSPRESS, overwrite=True)