diff --git a/.github/workflows/test-build-push.yml b/.github/workflows/test-build-push.yml index 219fec61..03f475b0 100644 --- a/.github/workflows/test-build-push.yml +++ b/.github/workflows/test-build-push.yml @@ -29,7 +29,6 @@ jobs: gdal=3.5 h5py=3.6 h5netcdf=1.0 - isce3=0.8.0 numpy=1.20 numba=0.54 pillow==7.0 @@ -69,7 +68,7 @@ jobs: echo "NUMBA_BOUNDSCHECK=1" >> $GITHUB_ENV - name: Test (with numba boundscheck on) run: | - pytest + pytest -n0 # https://community.codecov.com/t/numba-jitted-methods-are-not-captured-by-codecov/2649 # - name: Coverage report # uses: codecov/codecov-action@v2 diff --git a/CHANGELOG.md b/CHANGELOG.md index 664a4e2d..e1741f36 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# Unreleased + +**Added** + +- Save a multilooked version of the PS mask for output inspection + # [0.2.0](https://github.com/opera-adt/dolphin/compare/v0.1.0...v0.2.0) - 2023-07-25 **Added** diff --git a/README.md b/README.md index 7816126a..34e1ccd2 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,11 @@ High resolution wrapped phase estimation for InSAR using combined PS/DS processi `dolphin` is available on conda: ```bash -conda install -c conda-forge dolphin +mamba install -c conda-forge dolphin ``` +(Note: [using `mamba`](https://mamba.readthedocs.io/en/latest/mamba-installation.html#mamba-install) is recommended for conda-forge packages, but miniconda can also be used.) + To install locally: 1. Download source code: @@ -23,12 +25,12 @@ git clone https://github.com/opera-adt/dolphin.git && cd dolphin ``` 2. Install dependencies: ```bash -conda env create --file conda-env.yml +mamba env create --file conda-env.yml ``` or if you have an existing environment: ```bash -conda env update --name my-existing-env --file conda-env.yml +mamba env update --name my-existing-env --file conda-env.yml ``` 3. Install `dolphin` via pip: diff --git a/conda-env.yml b/conda-env.yml index 211ac91d..438a84b7 100644 --- a/conda-env.yml +++ b/conda-env.yml @@ -7,9 +7,9 @@ dependencies: - git # for pip install, due to setuptools_scm - gdal>=3.3 - h5py>=3.6 - - hdf5<1.12.2 # https://github.com/SciTools/iris/issues/5187 and https://github.com/pydata/xarray/issues/7549 + - hdf5!=1.12.2 # https://github.com/SciTools/iris/issues/5187 and https://github.com/pydata/xarray/issues/7549 - h5netcdf>=1.0 - - isce3>=0.8.0 + - isce3 # >=0.14.0 # Right now, isce3 is messes up conda's solvers. Should move to optional. - numba>=0.54 - numpy>=1.20 - pillow>=7.0 diff --git a/docs/getting-started.md b/docs/getting-started.md index 870b1dc1..ee333eeb 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -1,33 +1,13 @@ ## Install -The following will install `dolphin` into a conda environment. -1. Download source code: -```bash -git clone https://github.com/opera-adt/dolphin.git && cd dolphin -``` -2. Install dependencies: -```bash -conda env create --file conda-env.yml -``` +`dolphin` is available on conda-forge: -or if you have an existing environment: ```bash -conda env update --name my-existing-env --file conda-env.yml -``` - -3. Install `dolphin` via pip: -```bash -conda activate dolphin-env -python -m pip install . +mamba install -c conda-forge dolphin ``` -If you have access to a GPU, you can install the extra requirements from running the GPU accelerated algorithms: -```bash -conda env update --name dolphin-env --file conda-env-gpu-extras.yml -``` - ## Usage The main entry point for running the phase estimation/stitching and unwrapping workflows is named `dolphin`, which has two subcommands: @@ -66,16 +46,42 @@ The full set of options is written to the configuration file; you can edit this To contribute to the development of `dolphin`, you can fork the repository and install the package in development mode. We encourage new features to be developed on a new branch of your fork, and then submitted as a pull request to the main repository. -Once you're ready to write new code, you can use the following additional steps to add to your development environment: +To install locally, + +1. Download source code: +```bash +git clone https://github.com/opera-adt/dolphin.git && cd dolphin +``` +2. Install dependencies: +```bash +mamba env create --file conda-env.yml +``` + +or if you have an existing environment: +```bash +mamba env update --name my-existing-env --file conda-env.yml +``` + +3. Install `dolphin` via pip: +```bash +mamba activate dolphin-env +python -m pip install -e . +``` + + +If you have access to a GPU, you can install the extra requirements from running the GPU accelerated algorithms: +```bash +mamba env update --name dolphin-env --file conda-env-gpu-extras.yml +``` +The extra packages required for testing and building the documentation can be installed: ```bash # Run "pip install -e" to install with extra development requirements python -m pip install -e ".[docs,test]" ``` -This will install the `dolphin` package in development mode, and install the additional dependencies for documentation and testing. -After changing code, we use [`pre-commit`](https://pre-commit.com/) to automatically run linting and formatting: +We use [`pre-commit`](https://pre-commit.com/) to automatically run linting and formatting: ```bash # Get pre-commit hooks so that linting/formatting is done automatically pre-commit install diff --git a/pyproject.toml b/pyproject.toml index ca533299..6fe7f884 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -65,7 +65,7 @@ ignore = "D100,D102,D104,D105,D106,D107,D203,D204,D213,D413" [tool.pytest.ini_options] doctest_optionflags = "NORMALIZE_WHITESPACE NUMBER" -addopts = " --cov=dolphin -n auto --maxprocesses=8 --doctest-modules --ignore=scripts --ignore=docs --ignore=data" +addopts = " --cov=dolphin -n auto --maxprocesses=8 --doctest-modules --randomly-seed=1234 --ignore=scripts --ignore=docs --ignore=data" filterwarnings = [ "error", # DeprecationWarning thrown in pkg_resources for older numba verions and llvmlite diff --git a/src/dolphin/_background.py b/src/dolphin/_background.py index be143a85..a3577db9 100644 --- a/src/dolphin/_background.py +++ b/src/dolphin/_background.py @@ -4,8 +4,7 @@ from collections.abc import Callable from concurrent.futures import Executor, Future from queue import Empty, Full, Queue -from threading import Event, Thread -from threading import enumerate as threading_enumerate +from threading import Event, Thread, main_thread from typing import Any, Optional from dolphin._log import get_log @@ -16,17 +15,6 @@ _DEFAULT_TIMEOUT = 0.5 -def is_main_thread_active() -> bool: - """Check if the main thread is still active. - - Used to check if the writing thread should exit if there was - some exception in the main thread. - - Source: https://stackoverflow.com/a/23443397/4174466 - """ - return any((i.name == "MainThread") and i.is_alive() for i in threading_enumerate()) - - class BackgroundWorker(abc.ABC): """Base class for doing work in a background thread. @@ -78,7 +66,7 @@ def __init__( def _consume_work_queue(self): while True: - if not is_main_thread_active(): + if not main_thread().is_alive(): break logger.debug(f"{self.name} getting work") @@ -312,7 +300,7 @@ def run(self): # Write the header f.write("time(s),memory(GB)\n") - while not self._finished_event.is_set() and is_main_thread_active(): + while not self._finished_event.is_set() and main_thread().is_alive(): mem = self._get_gpu_memory() t_cur = time.time() - self.t0 with open(self.log_file, "a") as f: diff --git a/src/dolphin/ps.py b/src/dolphin/ps.py index efa81205..6eaa4870 100644 --- a/src/dolphin/ps.py +++ b/src/dolphin/ps.py @@ -257,7 +257,7 @@ def multilook_ps_mask( strides: dict[str, int], ps_mask_file: Filename, output_file: Optional[Filename] = None, -): +) -> Path: """Create a multilooked version of the full-res PS mask. Parameters @@ -269,17 +269,24 @@ def multilook_ps_mask( output_file : Optional[Filename], optional Name of file to save result to. Defaults to same as `ps_mask_file`, but with "_looked" added before suffix. + + Returns + ------- + output_file : Path """ if strides == {"x": 1, "y": 1}: logger.info("No striding request, skipping multilook.") - return + return Path(ps_mask_file) if output_file is None: ps_suffix = Path(ps_mask_file).suffix - output_file = Path(str(ps_mask_file).replace(ps_suffix, f"_looked{ps_suffix}")) - logger.info(f"Saving a looked PS mask to {output_file}") - if Path(output_file).exists(): - logger.info(f"{output_file} exists, skipping.") - return + out_path = Path(str(ps_mask_file).replace(ps_suffix, f"_looked{ps_suffix}")) + logger.info(f"Saving a looked PS mask to {out_path}") + else: + out_path = Path(output_file) + + if Path(out_path).exists(): + logger.info(f"{out_path} exists, skipping.") + return out_path ps_mask = io.load_gdal(ps_mask_file, masked=True) full_rows, full_cols = ps_mask.shape @@ -289,11 +296,12 @@ def multilook_ps_mask( # make sure it's the same size as the MLE result/temp_coh after padding out_rows, out_cols = full_rows // strides["y"], full_cols // strides["x"] ps_mask_looked = ps_mask_looked[:out_rows, :out_cols] - ps_mask_looked = ps_mask_looked.astype("uint8").fill(255) + ps_mask_looked = ps_mask_looked.astype("uint8").filled(255) io.write_arr( arr=ps_mask_looked, like_filename=ps_mask_file, - output_name=output_file, + output_name=out_path, strides=strides, nodata=255, ) + return out_path diff --git a/src/dolphin/utils.py b/src/dolphin/utils.py index f77015d2..5a4832e1 100644 --- a/src/dolphin/utils.py +++ b/src/dolphin/utils.py @@ -331,11 +331,6 @@ def full_suffix(filename: Filename): return "".join(fpath.suffixes) -def half_window_to_full(half_window: Union[list, tuple]) -> tuple[int, int]: - """Convert a half window size to a full window size.""" - return (2 * half_window[0] + 1, 2 * half_window[1] + 1) - - def gpu_is_available() -> bool: """Check if a GPU is available.""" try: diff --git a/src/dolphin/workflows/_utils.py b/src/dolphin/workflows/_utils.py index 11b3a7cc..aad99543 100644 --- a/src/dolphin/workflows/_utils.py +++ b/src/dolphin/workflows/_utils.py @@ -19,8 +19,6 @@ logger = get_log(__name__) -__all__ = ["group_by_burst", "setup_output_folder"] - def group_by_burst( file_list: Sequence[Filename], diff --git a/src/dolphin/workflows/s1_disp.py b/src/dolphin/workflows/s1_disp.py index b2182823..88502580 100755 --- a/src/dolphin/workflows/s1_disp.py +++ b/src/dolphin/workflows/s1_disp.py @@ -97,6 +97,7 @@ def run( ifg_file_list: list[Path] = [] tcorr_file_list: list[Path] = [] + ps_file_list: list[Path] = [] # The comp_slc tracking object is a dict, since we'll need to organize # multiple comp slcs by burst (they'll have the same filename) comp_slc_dict: dict[str, Path] = {} @@ -119,10 +120,11 @@ def run( for fut in fut_to_burst: burst = fut_to_burst[fut] - cur_ifg_list, comp_slc, tcorr = fut.result() + cur_ifg_list, comp_slc, tcorr, ps_file = fut.result() ifg_file_list.extend(cur_ifg_list) comp_slc_dict[burst] = comp_slc tcorr_file_list.append(tcorr) + ps_file_list.append(ps_file) # ################################### # 2. Stitch and unwrap interferograms @@ -131,6 +133,7 @@ def run( stitch_and_unwrap.run( ifg_file_list=ifg_file_list, tcorr_file_list=tcorr_file_list, + ps_file_list=ps_file_list, cfg=cfg, debug=debug, ) diff --git a/src/dolphin/workflows/stitch_and_unwrap.py b/src/dolphin/workflows/stitch_and_unwrap.py index 48f83601..c00c11e1 100644 --- a/src/dolphin/workflows/stitch_and_unwrap.py +++ b/src/dolphin/workflows/stitch_and_unwrap.py @@ -15,6 +15,7 @@ def run( ifg_file_list: Sequence[Path], tcorr_file_list: Sequence[Path], + ps_file_list: Sequence[Path], cfg: Workflow, debug: bool = False, unwrap_jobs: int = 1, @@ -23,11 +24,13 @@ def run( Parameters ---------- - ifg_file_list : Sequence[VRTInterferogram] - Sequence of [`VRTInterferogram`][dolphin.interferogram.VRTInterferogram] objects - to stitch together + ifg_file_list : Sequence[Path] + Sequence of interferograms files. + Separate bursts (if any) will be stitched together before unwrapping. tcorr_file_list : Sequence[Path] - Sequence of paths to the correlation files for each interferogram + Sequence of paths to the burst-wise temporal coherence files. + ps_file_list : Sequence[Path] + Sequence of paths to the (looked) burst-wise ps mask files. cfg : Workflow [`Workflow`][dolphin.workflows.config.Workflow] object with workflow parameters debug : bool, optional @@ -77,7 +80,18 @@ def run( tcorr_file_list, outfile=stitched_tcorr_file, driver="GTiff", - overwrite=False, + out_bounds=cfg.output_options.bounds, + out_bounds_epsg=cfg.output_options.bounds_epsg, + ) + + # Stitch the looked PS files + stitched_ps_file = stitched_ifg_dir / "ps_mask_looked.tif" + stitching.merge_images( + ps_file_list, + outfile=stitched_ps_file, + out_nodata=255, + driver="GTiff", + resample_alg="nearest", out_bounds=cfg.output_options.bounds, out_bounds_epsg=cfg.output_options.bounds_epsg, ) diff --git a/src/dolphin/workflows/wrapped_phase.py b/src/dolphin/workflows/wrapped_phase.py index a7f7350d..308ec0a9 100644 --- a/src/dolphin/workflows/wrapped_phase.py +++ b/src/dolphin/workflows/wrapped_phase.py @@ -13,7 +13,7 @@ @log_runtime -def run(cfg: Workflow, debug: bool = False) -> tuple[list[Path], Path, Path]: +def run(cfg: Workflow, debug: bool = False) -> tuple[list[Path], Path, Path, Path]: """Run the displacement workflow on a stack of SLCs. Parameters @@ -88,7 +88,9 @@ def run(cfg: Workflow, debug: bool = False) -> tuple[list[Path], Path, Path]: # Save a looked version of the PS mask too strides = cfg.output_options.strides - ps.multilook_ps_mask(strides=strides, ps_mask_file=cfg.ps_options._output_file) + ps_looked_file = ps.multilook_ps_mask( + strides=strides, ps_mask_file=cfg.ps_options._output_file + ) # ######################### # phase linking/EVD step @@ -183,4 +185,4 @@ def run(cfg: Workflow, debug: bool = False) -> tuple[list[Path], Path, Path]: else: ifg_file_list = [ifg.path for ifg in network.ifg_list] # type: ignore - return ifg_file_list, comp_slc_file, tcorr_file + return ifg_file_list, comp_slc_file, tcorr_file, ps_looked_file diff --git a/tests/requirements.txt b/tests/requirements.txt index 1254ebc3..0481faf6 100644 --- a/tests/requirements.txt +++ b/tests/requirements.txt @@ -4,4 +4,5 @@ pooch pre-commit pytest pytest-cov +pytest-randomly # control random seed pytest-xdist # parallel tests: https://pytest-xdist.readthedocs.io/en/latest/ diff --git a/tests/test_shp.py b/tests/test_shp.py index 932e7233..3fa84ded 100644 --- a/tests/test_shp.py +++ b/tests/test_shp.py @@ -110,7 +110,7 @@ def test_shp_half_mean_different(slcs, method): halfwin_rowcol = (5, 5) # make the top half different amplitude mean2 = mean.copy() - mean2[:5, :] += 500 + mean2[:5, :] += 2000 # For this test, make all variances equal (just checking mean) var[:] = var[5, 5] @@ -140,7 +140,7 @@ def test_shp_half_var_different(slcs, method): halfwin_rowcol = (5, 5) # make the top half different amplitude var2 = var.copy() - var2[:5, :] += 500 + var2[:5, :] += 5000 # For this test, make all means equal (just checking var) mean[:] = mean[5, 5]