Skip to content

Commit

Permalink
Fuse Coreg point functions and add consistent Raster-Point logic (G…
Browse files Browse the repository at this point in the history
  • Loading branch information
rhugonnet authored Mar 29, 2024
1 parent 48bbf17 commit 663a702
Show file tree
Hide file tree
Showing 27 changed files with 1,804 additions and 1,010 deletions.
7 changes: 6 additions & 1 deletion .github/workflows/python-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ jobs:
if: steps.cache.outputs.cache-hit != 'true'
run: |
mamba install pyyaml python=${{ matrix.python-version }}
python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,opencv,pytransform3d" "environment.yml"
python .github/scripts/generate_yml_env_fixed_py.py --pyv ${{ matrix.python-version }} --add "graphviz,pytransform3d" "environment.yml"
mamba env update -n xdem-dev -f environment-ci-py${{ matrix.python-version }}.yml
- name: Install project
Expand Down Expand Up @@ -99,6 +99,11 @@ jobs:
- name: Setup pip dependencies
run: pip install pytest-cov coveralls coveragepy-lcov

- name: Print conda environment (for debugging)
run: |
conda info
conda list
- name: Test with pytest
run: |
# We unset the PROJ_DATA environment variable to make PROJ work on Windows
Expand Down
10 changes: 5 additions & 5 deletions dev-environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,16 +13,14 @@ dependencies:
- tqdm
- scikit-image=0.*
- scikit-gstat>=1.0
- geoutils=0.1.*
- geoutils>=0.1.2

# Development-specific, to mirror manually in setup.cfg [options.extras_require].
- pip

# Optional dependencies
- opencv
- openh264
- pytransform3d
# - richdem
- richdem

# Test dependencies
- pytest
Expand All @@ -49,6 +47,8 @@ dependencies:

# Optional dependencies
- noisyopt
# "Headless" needed for opencv to install without requiring system dependencies
- opencv-contrib-python-headless

# To run CI against latest GeoUtils
# - git+https://github.com/GlacioHack/geoutils.git
# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points
8 changes: 4 additions & 4 deletions environment.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ dependencies:
- tqdm
- scikit-image=0.*
- scikit-gstat>=1.0
- geoutils=0.1.*
- geoutils>=0.1.2
- pip

# To run CI against latest GeoUtils
# - pip:
# - git+https://github.com/GlacioHack/geoutils.git
# To run CI against latest GeoUtils
# - pip:
# - git+https://github.com/rhugonnet/geoutils.git@fix_to_points
9 changes: 4 additions & 5 deletions examples/basic/plot_icp_coregistration.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@
)

# This will apply the matrix along the center of the DEM
rotated_dem_data = xdem.coreg.apply_matrix(dem.data.squeeze(), transform=dem.transform, matrix=rotation_matrix)
rotated_dem = xdem.DEM.from_array(rotated_dem_data, transform=dem.transform, crs=dem.crs, nodata=-9999)
rotated_dem = xdem.coreg.apply_matrix(dem, matrix=rotation_matrix)

# %%
# We can plot the difference between the original and rotated DEM.
Expand Down Expand Up @@ -73,11 +72,11 @@

for i, (approach, name) in enumerate(approaches):
approach.fit(
reference_dem=dem,
dem_to_be_aligned=rotated_dem,
reference_elev=dem,
to_be_aligned_elev=rotated_dem,
)

corrected_dem = approach.apply(dem=rotated_dem)
corrected_dem = approach.apply(elev=rotated_dem)

diff = dem - corrected_dem

Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ fallback_version = "0.0.1"
target_version = ['py36']

[tool.pytest.ini_options]
addopts = "--doctest-modules"
addopts = "--doctest-modules -W error::UserWarning"
testpaths = [
"tests",
"xdem"
Expand Down
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -11,5 +11,5 @@ scipy==1.*
tqdm
scikit-image==0.*
scikit-gstat>=1.0
geoutils==0.1.*
geoutils>=0.1.2
pip
74 changes: 37 additions & 37 deletions tests/test_coreg/test_affine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
from __future__ import annotations

import copy
import warnings

import geopandas as gpd
import numpy as np
import pytest
import rasterio as rio
Expand All @@ -17,11 +17,10 @@

def load_examples() -> tuple[RasterType, RasterType, Vector]:
"""Load example files to try coregistration methods with."""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
reference_raster = Raster(examples.get_path("longyearbyen_ref_dem"))
to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem"))
glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines"))

reference_raster = Raster(examples.get_path("longyearbyen_ref_dem"))
to_be_aligned_raster = Raster(examples.get_path("longyearbyen_tba_dem"))
glacier_mask = Vector(examples.get_path("longyearbyen_glacier_outlines"))

return reference_raster, to_be_aligned_raster, glacier_mask

Expand All @@ -32,32 +31,34 @@ class TestAffineCoreg:
inlier_mask = ~outlines.create_mask(ref)

fit_params = dict(
reference_dem=ref.data,
dem_to_be_aligned=tba.data,
reference_elev=ref.data,
to_be_aligned_elev=tba.data,
inlier_mask=inlier_mask,
transform=ref.transform,
crs=ref.crs,
verbose=False,
)
# Create some 3D coordinates with Z coordinates being 0 to try the apply_pts functions.
points = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T
# Create some 3D coordinates with Z coordinates being 0 to try the apply functions.
points_arr = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [0, 0, 0, 0]], dtype="float64").T
points = gpd.GeoDataFrame(
geometry=gpd.points_from_xy(x=points_arr[:, 0], y=points_arr[:, 1], crs=ref.crs), data={"z": points_arr[:, 2]}
)

def test_from_classmethods(self) -> None:
warnings.simplefilter("error")

# Check that the from_matrix function works as expected.
vshift = 5
matrix = np.diag(np.ones(4, dtype=float))
matrix[2, 3] = vshift
coreg_obj = AffineCoreg.from_matrix(matrix)
transformed_points = coreg_obj.apply_pts(self.points)
assert transformed_points[0, 2] == vshift
transformed_points = coreg_obj.apply(self.points)
assert all(transformed_points["z"].values == vshift)

# Check that the from_translation function works as expected.
x_offset = 5
coreg_obj2 = AffineCoreg.from_translation(x_off=x_offset)
transformed_points2 = coreg_obj2.apply_pts(self.points)
assert np.array_equal(self.points[:, 0] + x_offset, transformed_points2[:, 0])
transformed_points2 = coreg_obj2.apply(self.points)
assert np.array_equal(self.points.geometry.x.values + x_offset, transformed_points2.geometry.x.values)

# Try to make a Coreg object from a nan translation (should fail).
try:
Expand All @@ -67,7 +68,6 @@ def test_from_classmethods(self) -> None:
raise exception

def test_vertical_shift(self) -> None:
warnings.simplefilter("error")

# Create a vertical shift correction instance
vshiftcorr = coreg.VerticalShift()
Expand All @@ -86,19 +86,19 @@ def test_vertical_shift(self) -> None:
assert matrix[2, 3] == vshift, matrix

# Check that the first z coordinate is now the vertical shift
assert vshiftcorr.apply_pts(self.points)[0, 2] == vshiftcorr._meta["vshift"]
assert all(vshiftcorr.apply(self.points)["z"].values == vshiftcorr._meta["vshift"])

# Apply the model to correct the DEM
tba_unshifted, _ = vshiftcorr.apply(self.tba.data, self.ref.transform, self.ref.crs)
tba_unshifted, _ = vshiftcorr.apply(self.tba.data, transform=self.ref.transform, crs=self.ref.crs)

# Create a new vertical shift correction model
vshiftcorr2 = coreg.VerticalShift()
# Check that this is indeed a new object
assert vshiftcorr is not vshiftcorr2
# Fit the corrected DEM to see if the vertical shift will be close to or at zero
vshiftcorr2.fit(
reference_dem=self.ref.data,
dem_to_be_aligned=tba_unshifted,
reference_elev=self.ref.data,
to_be_aligned_elev=tba_unshifted,
transform=self.ref.transform,
crs=self.ref.crs,
inlier_mask=self.inlier_mask,
Expand Down Expand Up @@ -157,17 +157,16 @@ def test_gradientdescending(self, subsample: int = 10000, inlier_mask: bool = Tr

# Run co-registration
gds = xdem.coreg.GradientDescending(subsample=subsample)
gds.fit_pts(
self.ref.to_points().ds,
gds.fit(
self.ref.to_pointcloud(data_column_name="z").ds,
self.tba,
inlier_mask=inlier_mask,
verbose=verbose,
subsample=subsample,
z_name="b1",
random_state=42,
)
assert gds._meta["offset_east_px"] == pytest.approx(-0.496000, rel=1e-1, abs=0.1)
assert gds._meta["offset_north_px"] == pytest.approx(-0.1875, rel=1e-1, abs=0.1)
assert gds._meta["vshift"] == pytest.approx(-1.8730, rel=1e-1)

shifts = (gds._meta["offset_east_px"], gds._meta["offset_north_px"], gds._meta["vshift"])
assert shifts == pytest.approx((0.03525, -0.59775, -2.39144), abs=10e-5)

@pytest.mark.parametrize("shift_px", [(1, 1), (2, 2)]) # type: ignore
@pytest.mark.parametrize("coreg_class", [coreg.NuthKaab, coreg.GradientDescending, coreg.ICP]) # type: ignore
Expand All @@ -177,14 +176,15 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb
For comparison of coreg algorithms:
Shift a ref_dem on purpose, e.g. shift_px = (1,1), and then applying coreg to shift it back.
"""
warnings.simplefilter("error")
res = self.ref.res[0]

# shift DEM by shift_px
shifted_ref = self.ref.copy()
shifted_ref.shift(shift_px[0] * res, shift_px[1] * res, inplace=True)

shifted_ref_points = shifted_ref.to_points(as_array=False, subsample=subsample, pixel_offset="center").ds
shifted_ref_points = shifted_ref.to_pointcloud(
subsample=subsample, force_pixel_offset="center", random_state=42
).ds
shifted_ref_points["E"] = shifted_ref_points.geometry.x
shifted_ref_points["N"] = shifted_ref_points.geometry.y
shifted_ref_points.rename(columns={"b1": "z"}, inplace=True)
Expand All @@ -198,7 +198,7 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb
if points_or_raster == "raster":
coreg_obj.fit(shifted_ref, self.ref, verbose=verbose, random_state=42)
elif points_or_raster == "points":
coreg_obj.fit_pts(shifted_ref_points, self.ref, verbose=verbose, random_state=42)
coreg_obj.fit(shifted_ref_points, self.ref, verbose=verbose, random_state=42)

if coreg_class.__name__ == "ICP":
matrix = coreg_obj.to_matrix()
Expand All @@ -222,7 +222,6 @@ def test_coreg_example_shift(self, shift_px, coreg_class, points_or_raster, verb
raise AssertionError(f"Diffs are too big. east: {best_east_diff:.2f} px, north: {best_north_diff:.2f} px")

def test_nuth_kaab(self) -> None:
warnings.simplefilter("error")

nuth_kaab = coreg.NuthKaab(max_iterations=10)

Expand Down Expand Up @@ -260,15 +259,17 @@ def test_nuth_kaab(self) -> None:
assert np.sqrt(np.mean(np.square(diff))) < 1

# Transform some arbitrary points.
transformed_points = nuth_kaab.apply_pts(self.points)
transformed_points = nuth_kaab.apply(self.points)

# Check that the x shift is close to the pixel_shift * image resolution
assert abs((transformed_points[0, 0] - self.points[0, 0]) - pixel_shift * self.ref.res[0]) < 0.1
assert all(
abs((transformed_points.geometry.x.values - self.points.geometry.x.values) - pixel_shift * self.ref.res[0])
< 0.1
)
# Check that the z shift is close to the original vertical shift.
assert abs((transformed_points[0, 2] - self.points[0, 2]) + vshift) < 0.1
assert all(abs((transformed_points["z"].values - self.points["z"].values) + vshift) < 0.1)

def test_tilt(self) -> None:
warnings.simplefilter("error")

# Try a 1st degree deramping.
tilt = coreg.Tilt()
Expand All @@ -291,12 +292,11 @@ def test_tilt(self) -> None:
assert np.abs(np.mean(periglacial_offset)) < 0.02

def test_icp_opencv(self) -> None:
warnings.simplefilter("error")

# Do a fast and dirty 3 iteration ICP just to make sure it doesn't error out.
icp = coreg.ICP(max_iterations=3)
icp.fit(**self.fit_params)

aligned_dem, _ = icp.apply(self.tba.data, self.ref.transform, self.ref.crs)
aligned_dem, _ = icp.apply(self.tba.data, transform=self.ref.transform, crs=self.ref.crs)

assert aligned_dem.shape == self.ref.data.squeeze().shape
Loading

0 comments on commit 663a702

Please sign in to comment.