diff --git a/docs/changelog.rst b/docs/changelog.rst index aed8bcb5..10553a24 100644 --- a/docs/changelog.rst +++ b/docs/changelog.rst @@ -6,24 +6,37 @@ Distinction is made between new methods (Added), changes to existing methods (Ch The format is based on `Keep a Changelog`_, and this project adheres to `Semantic Versioning`_. -v1.0.4 (Unreleased) +v1.1.0 (05-09-2024) =================== +**This release contains some breaking changes and can only be used in combination with +`SFINCS versions ≥ 2.1.1 `_.** + +The most important change is the implementation of a new subgrid methodology including wet fraction as in Van Ormondt et al. (2024, in review), +which is now written as a NetCDF file! The old implementation is still available when providing the original binary file, but then all wet fractions are assumed to be 1. + +Next to this, some minor additions and bugfixes are made that improve the overall functionality of the package. Added ----- - improved subgrid tables that account for the wet fraction of the cell (#160) - add source points at headwater locations with `SfincsModel.setup_river_inflow` (#170) +- it's now possible to provide a format/precision in `SfincsModel.write_forcing` (#197) +- river bed levels for burn-in can now be provided at point locations in `SfincsModel.setup_subgrid` (#209) Changed ------- - improved subgrid tables are saved as NetCDF (#160) +- improved weighting of adjacent cells in u/v points for determining representative Manning roughness and conveyance depth (#200) - In `SfincsModel.setup_river_inflow`, in case of a confluence within a user-defined buffer of the - model boundary the confluence rather than both tributaries is selected as inflow point. (#202) + model boundary, the confluence rather than both tributaries is selected as inflow point. (#202) +- turned on "baro", the atmospheric pressure term in the momentum equation, in sfincs.inp by default (#208) +- the expected variable names for wind and pressure forcing have been changed to "wind10_u", "wind10_v" and "press_msl" to match hydromt-core conventions (#211) Fixed ----- - rounding errors in `workflows.tile_window` which resulted in erronuous slippy-tiles (#178) - "active geometry column to use has not been set" error for GeoDataFrame (#180) +- added nodata value to raster before writing (#199) v1.0.3 (3 January 2024) diff --git a/docs/user_guide/sfincs_build_update.rst b/docs/user_guide/sfincs_build_update.rst index e3f95145..e75ccdf5 100644 --- a/docs/user_guide/sfincs_build_update.rst +++ b/docs/user_guide/sfincs_build_update.rst @@ -147,4 +147,4 @@ See `Example: Build from Script <../_examples/build_from_script.ipynb>`_ for a m Example: Build from CLI <../_examples/build_from_cli.ipynb> Example: Build from script <../_examples/build_from_script.ipynb> Example: Setup model forcing <../_examples/example_forcing.ipynb> - Example: Working with data <../_examples/example_datasources.ipynb> + .. Example: Working with data <../_examples/example_datasources.ipynb> diff --git a/environment.yml b/environment.yml index 98ea9cec..af93b103 100644 --- a/environment.yml +++ b/environment.yml @@ -10,8 +10,8 @@ dependencies: - descartes # to run examples - ffmpeg # to run examples (animation) # - git - - hydromt>=0.8.0 - - hydromt_sfincs>=1.0 + - hydromt>=0.10.0, <0.11 + - hydromt_sfincs>=1.1.0 - jupyterlab # to run examples - matplotlib # to run examples - pip diff --git a/envs/hydromt-sfincs-dev.yml b/envs/hydromt-sfincs-dev.yml index 4458ad0c..ebb3ce98 100644 --- a/envs/hydromt-sfincs-dev.yml +++ b/envs/hydromt-sfincs-dev.yml @@ -8,8 +8,8 @@ dependencies: - cartopy - descartes - ffmpeg -- geopandas>=0.8 -- hydromt>=0.9.1, <0.10 +- geopandas>=1.0 +- hydromt>=0.10, <0.11 - jupyterlab - matplotlib - nbsphinx diff --git a/hydromt_sfincs/__init__.py b/hydromt_sfincs/__init__.py index 9c051e8d..41d287be 100644 --- a/hydromt_sfincs/__init__.py +++ b/hydromt_sfincs/__init__.py @@ -3,7 +3,7 @@ from os.path import dirname, join, abspath -__version__ = "1.0.4.dev" +__version__ = "1.1.0" DATADIR = join(dirname(abspath(__file__)), "data") diff --git a/hydromt_sfincs/plots.py b/hydromt_sfincs/plots.py index 17e4b888..350ae31a 100644 --- a/hydromt_sfincs/plots.py +++ b/hydromt_sfincs/plots.py @@ -73,8 +73,8 @@ def plot_forcing(forcing: Dict, **kwargs): df.plot(drawstyle="steps", ax=axes[i]) elif ( name.startswith("press") - or name.startswith("wind_u") - or name.startswith("wind_v") + or name.startswith("wind10_u") + or name.startswith("wind10_v") ): df.plot.line(ax=axes[i]) elif name.startswith("wnd"): diff --git a/hydromt_sfincs/sfincs.py b/hydromt_sfincs/sfincs.py index 76f46b8d..ab8a86e7 100644 --- a/hydromt_sfincs/sfincs.py +++ b/hydromt_sfincs/sfincs.py @@ -61,7 +61,7 @@ class SfincsModel(GridModel): "press_2d": ("netamp", {"barometric_pressure": "press_2d"}), "wind_2d": ( "netamuamv", - {"eastward_wind": "wind_u", "northward_wind": "wind_v"}, + {"eastward_wind": "wind10_u", "northward_wind": "wind10_v"}, ), } _FORCING_SPW = {"spiderweb": "spw"} # TODO add read and write functions @@ -87,8 +87,8 @@ class SfincsModel(GridModel): "precip": {"standard_name": "precipitation", "unit": "mm.hr-1"}, "precip_2d": {"standard_name": "precipitation", "unit": "mm.hr-1"}, "press_2d": {"standard_name": "barometric pressure", "unit": "Pa"}, - "wind_u": {"standard_name": "eastward wind", "unit": "m/s"}, - "wind_v": {"standard_name": "northward wind", "unit": "m/s"}, + "wind10_u": {"standard_name": "eastward wind", "unit": "m/s"}, + "wind10_v": {"standard_name": "northward wind", "unit": "m/s"}, "wnd": {"standard_name": "wind", "unit": "m/s"}, } @@ -292,7 +292,7 @@ def setup_grid_from_region( # create grid from region # NOTE keyword rotated is added to still have the possibility to create unrotated grids if needed (e.g. for FEWS?) if rotated: - geom = self.geoms["region"].unary_union + geom = self.geoms["region"].union_all() x0, y0, mmax, nmax, rot = utils.rotated_grid( geom, res, dec_origin=dec_origin, dec_rotation=dec_rotation ) @@ -1079,11 +1079,11 @@ def setup_river_outflow( if np.any(self.mask == 2) and btype == "outflow": gdf_msk2 = utils.get_bounds_vector(self.mask) # NOTE: this should be a single geom - geom = gdf_msk2[gdf_msk2["value"] == 2].unary_union + geom = gdf_msk2[gdf_msk2["value"] == 2].union_all() gdf_out = gdf_out[~gdf_out.intersects(geom)] # remove outflow points near source points if "dis" in self.forcing and len(gdf_out) > 0: - geom = self.forcing["dis"].vector.to_gdf().unary_union + geom = self.forcing["dis"].vector.to_gdf().union_all() gdf_out = gdf_out[~gdf_out.intersects(geom)] # update mask diff --git a/hydromt_sfincs/subgrid.py b/hydromt_sfincs/subgrid.py index 1a741864..b40446b5 100644 --- a/hydromt_sfincs/subgrid.py +++ b/hydromt_sfincs/subgrid.py @@ -39,21 +39,17 @@ def read(self, file_name, mask): # get number of levels, point and uv points self.nlevels, self.nr_cells, self.nr_uv_points = ( - ds.dims["levels"], - ds.dims["np"], - ds.dims["npuv"], + ds.sizes["levels"], + ds.sizes["np"], + ds.sizes["npuv"], ) # find indices of active cells index_nm, index_mu1, index_nu1 = utils.find_uv_indices(mask) active_indices = np.where(index_nm > -1)[0] - active_indices_u = np.where(index_mu1 > -1)[0] - active_indices_v = np.where(index_nu1 > -1)[0] # convert 1D indices to 2D indices - active_z = np.unravel_index(active_indices, grid_dim, order="F") - active_u = np.unravel_index(active_indices_u, grid_dim, order="F") - active_v = np.unravel_index(active_indices_v, grid_dim, order="F") + active_cells = np.unravel_index(active_indices, grid_dim, order="F") # Initialize the data-arrays # Z points @@ -96,11 +92,11 @@ def read(self, file_name, mask): # Now read the data and add it to the data-arrays # use index_nm of the active cells in the new dataset - self.z_zmin[active_z] = ds["z_zmin"].values.flatten() - self.z_zmax[active_z] = ds["z_zmax"].values.flatten() - self.z_volmax[active_z] = ds["z_volmax"].values.flatten() + self.z_zmin[active_cells] = ds["z_zmin"].values.flatten() + self.z_zmax[active_cells] = ds["z_zmax"].values.flatten() + self.z_volmax[active_cells] = ds["z_volmax"].values.flatten() for ilevel in range(self.nlevels): - self.z_level[ilevel, active_z[0], active_z[1]] = ds["z_level"][ + self.z_level[ilevel, active_cells[0], active_cells[1]] = ds["z_level"][ ilevel ].values.flatten() @@ -108,20 +104,48 @@ def read(self, file_name, mask): var_list = ["zmin", "zmax", "ffit", "navg"] for var in var_list: uv_var = ds["uv_" + var].values.flatten() - self.u_zmin[active_u] = uv_var[index_mu1[active_indices_u]] - self.v_zmin[active_v] = uv_var[index_nu1[active_indices_v]] + + # Dynamically set the attribute for self.u_var and self.v_var + u_attr_name = f"u_{var}" + v_attr_name = f"v_{var}" + + # Retrieve the current attribute values + u_array = getattr(self, u_attr_name) + v_array = getattr(self, v_attr_name) + + # Update only the active indices + u_array[active_cells] = uv_var[index_mu1[active_indices]] + v_array[active_cells] = uv_var[index_nu1[active_indices]] + + # Set the modified arrays back to the attributes + setattr(self, u_attr_name, u_array) + setattr(self, v_attr_name, v_array) var_list_levels = ["havg", "nrep", "pwet"] for var in var_list_levels: for ilevel in range(self.nlevels): uv_var = ds["uv_" + var][ilevel].values.flatten() - self.u_havg[ilevel, active_u[0], active_u[1]] = uv_var[ - index_mu1[active_indices_u] + + # Dynamically set the attribute for self.u_var and self.v_var + u_attr_name = f"u_{var}" + v_attr_name = f"v_{var}" + + # Retrieve the current attribute values + u_array = getattr(self, u_attr_name) + v_array = getattr(self, v_attr_name) + + # Update only the active indices + u_array[ilevel, active_cells[0], active_cells[1]] = uv_var[ + index_mu1[active_indices] ] - self.v_havg[ilevel, active_v[0], active_v[1]] = uv_var[ - index_nu1[active_indices_v] + v_array[ilevel, active_cells[0], active_cells[1]] = uv_var[ + index_nu1[active_indices] ] + # Set the modified arrays back to the attributes + setattr(self, u_attr_name, u_array) + setattr(self, v_attr_name, v_array) + # close the dataset ds.close() @@ -431,8 +455,8 @@ def build( Threshold depth in SFINCS model, by default 0.01 m q_table_option : int, optional Option for the computation of the representative roughness and conveyance depth at u/v points, by default 2. - 1: "old" weighting method, compliant with SFINCS < v2.1.0, taking the avarage of the adjecent cells - 2: "improved" weighting method, recommended for SFINCS >= v2.1.0, that takes into account the wet fractions of the adjacent cells + 1: "old" weighting method, compliant with SFINCS < v2.1.1, taking the avarage of the adjecent cells + 2: "improved" weighting method, recommended for SFINCS >= v2.1.1, that takes into account the wet fractions of the adjacent cells manning_land, manning_sea : float, optional Constant manning roughness values for land and sea, by default 0.04 and 0.02 s.m-1/3 @@ -1066,7 +1090,7 @@ def subgrid_q_table( # Determine level size (metres) dlevel = (zmax - zmin) / (nlevels - 1) - # Option can be either 1 ("old, compliant with SFINCS < v2.1.0") or 2 ("new", recommended SFINCS >= v2.1.0) + # Option can be either 1 ("old, compliant with SFINCS < v2.1.") or 2 ("new", recommended SFINCS >= v2.1.) option = option # Loop through levels diff --git a/hydromt_sfincs/utils.py b/hydromt_sfincs/utils.py index 48a1324c..b4060ab9 100644 --- a/hydromt_sfincs/utils.py +++ b/hydromt_sfincs/utils.py @@ -221,7 +221,7 @@ def read_xy(fn: Union[str, Path], crs: Union[int, CRS] = None) -> gpd.GeoDataFra def read_xyn(fn: str, crs: int = None): - df = pd.read_csv(fn, index_col=False, header=None, delim_whitespace=True).rename( + df = pd.read_csv(fn, index_col=False, header=None, sep="\s+").rename( columns={0: "x", 1: "y"} ) if len(df.columns) > 2: @@ -280,7 +280,7 @@ def read_timeseries(fn: Union[str, Path], tref: Union[str, datetime]) -> pd.Data Dataframe of timeseries with parsed time index. """ tref = parse_datetime(tref) - df = pd.read_csv(fn, delim_whitespace=True, index_col=0, header=None) + df = pd.read_csv(fn, index_col=0, header=None, sep="\s+") df.index = pd.to_datetime(df.index.values, unit="s", origin=tref) df.columns = df.columns.values.astype(int) df.index.name = "time" @@ -800,7 +800,7 @@ def read_sfincs_map_results( continue if "x" in ds_map[var].dims and "y" in ds_map[var].dims: # drop to overwrite with ds_like.raster.coords - ds_face[var] = ds_map[var].drop(["xc", "yc"]) + ds_face[var] = ds_map[var].drop_vars(["xc", "yc"]) elif ds_map[var].ndim == 0: ds_face[var] = ds_map[var] else: diff --git a/hydromt_sfincs/workflows/bathymetry.py b/hydromt_sfincs/workflows/bathymetry.py index adb68d60..96426b1c 100644 --- a/hydromt_sfincs/workflows/bathymetry.py +++ b/hydromt_sfincs/workflows/bathymetry.py @@ -257,7 +257,7 @@ def burn_river_rect( # get gdf_riv outside of mask and buffer these lines # then merge with gdf_riv_mask to get the full river mask gdf_mask = gpd.GeoDataFrame( - geometry=[gdf_riv_mask.buffer(0).unary_union], + geometry=[gdf_riv_mask.buffer(0).union_all()], crs=gdf_riv_mask.crs, ) # create single polygon to clip gdf_riv_clip = gdf_riv.overlay(gdf_mask, how="difference") @@ -319,7 +319,7 @@ def burn_river_rect( # merge river lines > z points are interpolated along merged line if gdf_riv.index.size > 1: gdf_riv_merged = gpd.GeoDataFrame( - geometry=[linemerge(gdf_riv.unary_union)], crs=gdf_riv.crs + geometry=[linemerge(gdf_riv.union_all())], crs=gdf_riv.crs ) gdf_riv_merged = gdf_riv_merged.explode(index_parts=True).reset_index(drop=True) else: diff --git a/hydromt_sfincs/workflows/flwdir.py b/hydromt_sfincs/workflows/flwdir.py index f59ff334..00a5b370 100644 --- a/hydromt_sfincs/workflows/flwdir.py +++ b/hydromt_sfincs/workflows/flwdir.py @@ -56,7 +56,7 @@ def river_centerline_from_hydrography( gdf_riv = gpd.GeoDataFrame.from_features(feats, crs=da_flwdir.raster.crs) # clip to mask and remove empty geometries if gdf_mask is not None and isinstance(gdf_mask, gpd.GeoDataFrame): - gdf_riv = gdf_riv.to_crs(gdf_mask.crs).clip(gdf_mask.unary_union) + gdf_riv = gdf_riv.to_crs(gdf_mask.crs).clip(gdf_mask.union_all()) gdf_riv = gdf_riv[~gdf_riv.is_empty] # create river network from gdf to get distance from outlet 'rivlen' # length of river segments @@ -144,7 +144,7 @@ def river_source_points( gdf_mask = gdf_mask.to_crs("epsg:3857") # clip river to model gdf_mask - gdf_riv = gdf_riv.to_crs(gdf_mask.crs).clip(gdf_mask.unary_union) + gdf_riv = gdf_riv.to_crs(gdf_mask.crs).clip(gdf_mask.union_all()) # keep only lines gdf_riv = gdf_riv[ [t.endswith("LineString") for t in gdf_riv.geom_type] @@ -162,7 +162,7 @@ def river_source_points( return gpd.GeoDataFrame() # remove lines that fully are within the buffer of the mask boundary - bnd = gdf_mask.boundary.buffer(buffer).unary_union + bnd = gdf_mask.boundary.buffer(buffer).union_all() gdf_riv = gdf_riv[~gdf_riv.within(bnd)] # get source points 1m before the start/end of the river @@ -175,10 +175,10 @@ def river_source_points( # get points that do not intersect with up/downstream end of other river segments # use a small buffer of 5m around these points to account for dx and avoid issues with imprecise river geometries if src_type in ["inflow", "headwater"]: - pnts_ds = gdf_ds.buffer(5).unary_union + pnts_ds = gdf_ds.buffer(5).union_all() gdf_pnt = gdf_up[~gdf_up.intersects(pnts_ds)].reset_index(drop=True) elif src_type == "outflow": - pnts_up = gdf_up.buffer(5).unary_union + pnts_up = gdf_up.buffer(5).union_all() gdf_pnt = gdf_ds[~gdf_ds.intersects(pnts_up)].reset_index(drop=True) # get buffer around gdf_mask, in- and outflow points should be within this buffer diff --git a/pyproject.toml b/pyproject.toml index 03c9f952..b7e0d34d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,8 +12,8 @@ authors = [ ] dependencies = [ "affine", - "geopandas>=0.8", - "hydromt>=0.9.1, <0.11", + "geopandas>=1.0", + "hydromt>=0.10, <0.11", "numba", "numpy<2.0", # temp pin until bottleneck release v1.4 "pandas",