Skip to content

Commit

Permalink
Fix vlakken (#196)
Browse files Browse the repository at this point in the history
- misc updates op fix-scripts voor NZV en DOD
- samenvoegen voor LHM, inclusief patch RWS-HWS en reset van static
tables AAM
- uploaden feedbackformulieren

---------

Co-authored-by: ngoorden <[email protected]>
Co-authored-by: Martijn Visser <[email protected]>
  • Loading branch information
3 people authored Dec 9, 2024
1 parent 932a39b commit a2fd22d
Show file tree
Hide file tree
Showing 14 changed files with 740 additions and 180 deletions.
29 changes: 2 additions & 27 deletions notebooks/aa_en_maas/01b_fix_basin_area.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
# %% Import Libraries and Initialize Variables
import geopandas as gpd
import numpy as np
import pandas as pd
from ribasim.nodes import level_boundary, outlet

from ribasim_nl import CloudStorage, Model, NetworkValidator
from ribasim_nl.reset_static_tables import reset_static_tables

# Initialize cloud storage and set authority/model parameters
cloud_storage = CloudStorage()
Expand Down Expand Up @@ -209,32 +209,7 @@


# %%
# basin-profielen/state updaten
df = pd.DataFrame(
{
"node_id": np.repeat(model.basin.node.df.index.to_numpy(), 2),
"level": [0.0, 1.0] * len(model.basin.node.df),
"area": [0.01, 1000.0] * len(model.basin.node.df),
}
)
df.index.name = "fid"
model.basin.profile.df = df

df = model.basin.profile.df.groupby("node_id")[["level"]].max().reset_index()
df.index.name = "fid"
model.basin.state.df = df


# tabulated_rating_curves updaten
df = pd.DataFrame(
{
"node_id": np.repeat(model.tabulated_rating_curve.node.df.index.to_numpy(), 2),
"level": [0.0, 5] * len(model.tabulated_rating_curve.node.df),
"flow_rate": [0, 0.1] * len(model.tabulated_rating_curve.node.df),
}
)
df.index.name = "fid"
model.tabulated_rating_curve.static.df = df
model = reset_static_tables(model)


model.write(ribasim_model_dir.with_stem("AaenMaas_fix_model_area") / "aam.toml")
Expand Down
61 changes: 61 additions & 0 deletions notebooks/drents_overijsselse_delta/00_review_model_network.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
# %%

import pandas as pd

from ribasim_nl import CloudStorage, Model, NetworkValidator

cloud = CloudStorage()

authority = "DrentsOverijsselseDelta"
short_name = "dod"

ribasim_toml = cloud.joinpath(authority, "modellen", f"{authority}_fix_model_network", f"{short_name}.toml")
database_gpkg = ribasim_toml.with_name("database.gpkg")

model = Model.read(ribasim_toml)
network_validator = NetworkValidator(model)

verwerkt_dir = cloud.joinpath(authority, "verwerkt")
verwerkt_dir.mkdir(exist_ok=True)

modelfouten_gpkg = cloud.joinpath(authority, "verwerkt", "modelfouten.gpkg")

# %% verwijderen duplicated edges

duplicated_edges = len(model.edge.df[model.edge.df.duplicated()])
model.edge.df.drop_duplicates(inplace=True)

model.fix_unassigned_basin_area()

# %% wegschrijven fouten

# niet-bestaande fouten
mask = model.edge.df.to_node_id.isin(model.node_table().df.index) & model.edge.df.from_node_id.isin(
model.node_table().df.index
)

edge_mist_node_df = model.edge.df[~mask]
model.edge.df = model.edge.df[mask]

mask = model.edge.df.geometry.length == 0
model.edge.df[mask].centroid.to_file(modelfouten_gpkg, layer="edge_zonder_lengte")
model.edge.df = model.edge.df[~mask]

# niet-gekoppelde areas
model.basin.area.df[~model.basin.area.df.node_id.isin(model.basin.node.df.index)].to_file(
modelfouten_gpkg, layer="area_niet_een_basin"
)

model.basin.node.df[~model.basin.node.df.index.isin(model.basin.area.df.node_id)].to_file(
modelfouten_gpkg, layer="basin_zonder_area"
)

# ontbrekende basins
network_validator.node_invalid_connectivity().to_file(modelfouten_gpkg, layer="node_mist")
pd.concat([network_validator.edge_incorrect_connectivity(), edge_mist_node_df]).to_file(
modelfouten_gpkg, layer="ege_mist_node"
)

# nodes met verkeerde richting

model.invalid_topology_at_node().to_file(modelfouten_gpkg, layer="node_met_verkeerde_instroom_uitstroom_egde")
15 changes: 11 additions & 4 deletions notebooks/drents_overijsselse_delta/01_fix_model_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@
# Aansluiten NW boezem op Fryslan

# basin /area 1681 op te knippen nabij basin 1717 (rode lijn)
model.split_basin(split_line_gdf.at[14, "geometry"])
model.split_basin(geometry=split_line_gdf.at[14, "geometry"])
model.basin.area.df = model.basin.area.df[model.basin.area.df.node_id != 1717]

# basin 1682 te veranderen in een LevelBoundary
Expand Down Expand Up @@ -398,17 +398,22 @@


# %%
model.explode_basin_area() # all multipolygons to singles

# update from layers
actions = [
"remove_basin_area",
"split_basin",
"merge_basins",
"update_node",
"add_basin_area",
"add_basin",
"update_basin_area",
"redirect_edge",
"merge_basins",
"reverse_edge",
"update_node",
"deactivate_node",
"remove_node",
"move_node",
"remove_node",
]

actions = [i for i in actions if i in gpd.list_layers(model_edits_path).name.to_list()]
Expand All @@ -418,6 +423,8 @@
method = getattr(model, action)
keywords = inspect.getfullargspec(method).args
df = gpd.read_file(model_edits_path, layer=action, fid_as_index=True)
if "order" in df.columns:
df.sort_values("order", inplace=True)
for row in df.itertuples():
# filter kwargs by keywords
kwargs = {k: v for k, v in row._asdict().items() if k in keywords}
Expand Down
182 changes: 167 additions & 15 deletions notebooks/noorderzijlvest/01b_fix_basin_area.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
# %% Import Libraries and Initialize Variables
import inspect

import geopandas as gpd
import pandas as pd
from networkx import all_shortest_paths, shortest_path
from shapely.geometry import MultiLineString

from ribasim_nl import CloudStorage, Model
from ribasim_nl import CloudStorage, Model, Network

# Initialize cloud storage and set authority/model parameters
cloud_storage = CloudStorage()
Expand All @@ -14,19 +17,84 @@
ribasim_model_dir = cloud_storage.joinpath(authority_name, "modellen", f"{authority_name}_fix_model_network")
ribasim_model_path = ribasim_model_dir / f"{model_short_name}.toml"
model = Model.read(ribasim_model_path)
node_df = model.node_table().df

# read hydrologische eenheden
he_df = gpd.read_file(
cloud_storage.joinpath(authority_name, "verwerkt", "1_ontvangen_data", "20241113", "HydrologischeEenheden_v45.shp")
cloud_storage.joinpath(authority_name, "verwerkt", "1_ontvangen_data", "20241113", "HydrologischeEenheden_v45.shp"),
)
he_df.loc[:, "node_id"] = pd.Series()

he_snap_df = gpd.read_file(
cloud_storage.joinpath(authority_name, "verwerkt", "1_ontvangen_data", "20241113", "HE_v45_snappingpoints.shp")
cloud_storage.joinpath(authority_name, "verwerkt", "1_ontvangen_data", "20241113", "HE_v45_snappingpoints.shp"),
)

lines_gdf = gpd.read_file(
cloud_storage.joinpath(
authority_name, "verwerkt", "5_D_HYDRO_export", "hydroobjecten", "Noorderzijlvest_hydroobjecten.shp"
),
)
lines_gdf.crs = 28992
network = Network(lines_gdf)
network.to_file(cloud_storage.joinpath(authority_name, "verwerkt", "network.gpkg"))

# %% add snap_point to he_df

# Purpose is to define 1 point for every 1 he polygon
# We can use that later in finding he over the network

# drop duplicated kwk points
he_snap_df = he_snap_df[~(he_snap_df["Kunstwerk"].duplicated() & he_snap_df["Kunstwerk"].notna())]

# strip GPG from Kunstwerk column
mask = he_snap_df["Kunstwerk"].notna() & he_snap_df["Kunstwerk"].str.startswith("GPG")
he_snap_df.loc[mask, "Kunstwerk"] = he_snap_df[mask].Kunstwerk.str[3:]

# we define 1 outlet for every he
he_outlet_df = gpd.GeoDataFrame(geometry=gpd.GeoSeries(), crs=he_snap_df.crs, index=he_df.index)

mask = he_df["KWKuit"].notna()
he_outlet_df.loc[mask, "geometry"] = he_df[mask]["KWKuit"].apply(
lambda x: he_snap_df.set_index("Kunstwerk").at[x, "geometry"]
)
he_outlet_df.loc[~mask, "geometry"] = he_df[~mask]["HEIDENT"].apply(
lambda x: he_snap_df.set_index("HEIDENT").at[x, "geometry"]
)

he_outlet_df.loc[:, "HEIDENT"] = he_df["HEIDENT"]
he_outlet_df.set_index("HEIDENT", inplace=True)
he_df.set_index("HEIDENT", inplace=True)

he_outlet_df.to_file(cloud_storage.joinpath(authority_name, "verwerkt", "HydrologischeEenheden_v45_outlets.gpkg"))

# %% assign basin-id if we can find KWKuit in node_table
# %% Edit network

# We modify the network:
# merge basins in Lauwersmeer

# Load node edit data
model_edits_url = cloud_storage.joinurl(authority_name, "verwerkt", "model_edits.gpkg")
model_edits_path = cloud_storage.joinpath(authority_name, "verwerkt", "model_edits.gpkg")
if not model_edits_path.exists():
cloud_storage.download_file(model_edits_url)


for action in gpd.list_layers(model_edits_path).name.to_list():
print(action)
# get method and args
method = getattr(model, action)
keywords = inspect.getfullargspec(method).args
df = gpd.read_file(model_edits_path, layer=action, fid_as_index=True)
for row in df.itertuples():
# filter kwargs by keywords
kwargs = {k: v for k, v in row._asdict().items() if k in keywords}
method(**kwargs)

# %% assign Basin / Area using KWKuit

node_df = model.node_table().df


# we find Basin area if we kan find KWKuit in the model
def find_basin_id(kwk_code):
kwk_node_id = node_df[node_df.node_type != "Basin"].reset_index().set_index("name").at[kwk_code, "node_id"]
basin_node_id = model.upstream_node_id(kwk_node_id)
Expand All @@ -42,24 +110,108 @@ def find_basin_id(kwk_code):

he_df.loc[mask, "node_id"] = he_df[mask]["KWKuit"].apply(lambda x: find_basin_id(x))

# %% assign basin-id if we can find KWKuit in node_table
point, df = next(
i
for i in iter(he_snap_df[he_snap_df.duplicated("geometry", keep=False)].groupby("geometry"))
if i[1].Kunstwerk.notna().any()
)

# %% find he on network within basin


# We find all hydrologische eenheden using outlets between basin and it's connector-nodes
def get_network_node(point):
node = network.move_node(point, max_distance=1, align_distance=10)
if node is None:
node = network.add_node(point, max_distance=1)
if node is None:
node = network.nodes.distance(point).idxmin()
return node


for node_id in model.basin.node.df.index:
print(node_id)
# get basin_node_id
network_basin_node = get_network_node(node_df.at[node_id, "geometry"])
network._graph.nodes[network_basin_node]["node_id"] = node_id

upstream_node_ids = model.upstream_node_id(node_id)
if isinstance(upstream_node_ids, pd.Series):
upstream_node_ids = upstream_node_ids.to_list()
else:
upstream_node_ids = [upstream_node_ids]

downstream_node_ids = model.downstream_node_id(node_id)
if isinstance(downstream_node_ids, pd.Series):
downstream_node_ids = downstream_node_ids.to_list()
else:
downstream_node_ids = [downstream_node_ids]

upstream_nodes = [get_network_node(node_df.at[i, "geometry"]) for i in upstream_node_ids if i is not None]
downstream_nodes = [get_network_node(node_df.at[i, "geometry"]) for i in downstream_node_ids]

# empty list of LineStrings
data = []

# draw edges from upstream nodes
for idx, network_node in enumerate(upstream_nodes):
all_paths = list(all_shortest_paths(network.graph_undirected, source=network_node, target=network_basin_node))
if len(all_paths) > 1:
all_nodes = [i for i in upstream_nodes + downstream_nodes if i != network_node]
all_paths = [i for i in all_paths if not any(_node_id in all_nodes for _node_id in i)]
if len(all_paths) != 1:
all_paths = [shortest_path(network.graph_undirected, source=network_node, target=network_basin_node)]
else:
edge = network.path_to_line(all_paths[0])
if edge.length > 0:
data += [edge]

mask = (model.edge.df["from_node_id"] == upstream_node_ids[idx]) & (
model.edge.df["to_node_id"] == node_id
)
model.edge.df.loc[mask, ["geometry"]] = edge

# draw edges to downstream nodes
for idx, network_node in enumerate(downstream_nodes):
all_paths = list(all_shortest_paths(network.graph_undirected, target=network_node, source=network_basin_node))
if len(all_paths) > 1:
all_nodes = [i for i in upstream_nodes + downstream_nodes if i != network_node]
all_paths = [i for i in all_paths if not any(_node_id in all_nodes for _node_id in i)]
if len(all_paths) != 1:
all_paths = [shortest_path(network.graph_undirected, target=network_node, source=network_basin_node)]
else:
edge = network.path_to_line(all_paths[0])
if edge.length > 0:
data += [edge]

mask = (model.edge.df["to_node_id"] == downstream_node_ids[idx]) & (
model.edge.df["from_node_id"] == node_id
)
model.edge.df.loc[mask, ["geometry"]] = edge

mask = he_df.node_id.isna() & (he_outlet_df.distance(MultiLineString(data)) < 1)
he_df.loc[mask, ["node_id"]] = node_id

# %% add last missings

# We add last missing hydrologische eenheden on downstream basin
for row in he_df[he_df["node_id"].isna()].itertuples():
# row = next(i for i in he_df.itertuples() if i.Index == "GFE04712")
print(row.Index)
point = he_outlet_df.at[row.Index, "geometry"]

network_node = get_network_node(point)

basin_node_id = network.find_downstream(network_node, attribute="node_id")
he_df.loc[row.Index, ["node_id"]] = basin_node_id


# %% renew model Basin Area

# based on actions above we update the Basin / Area table:
# basins we found on KWKuit defined in the he-table

# %% dissolve geometry and take min summer target level as streefpeil
data = []
for node_id, df in he_df[he_df["node_id"].notna()].groupby("node_id"):
geometry = df.union_all()
streefpeil = df["OPVAFWZP"].min()

data += [{"node_id": node_id, "meta_streefpeil": streefpeil, "geometry": geometry}]

# %%
# add new basins to model
df = gpd.GeoDataFrame(data, crs=model.crs)
df.loc[:, "geometry"] = df.buffer(0.1).buffer(-0.1)
df.index.name = "fid"
Expand Down
18 changes: 18 additions & 0 deletions notebooks/rijkswaterstaat/8c_patch_model_202470
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# %%
from ribasim_nl import CloudStorage, Model

cloud = CloudStorage()

ribasim_toml = cloud.joinpath("Rijkswaterstaat", "modellen", "hws_2024_7_0", "hws.toml")
model = Model.read(ribasim_toml)

# %%update stuw driel
node_id = 8413
min_level = model.upstream_profile(node_id).level.min() + 0.1
mask = model.tabulated_rating_curve.static.df.node_id == node_id
mask = mask & (model.tabulated_rating_curve.static.df.level < min_level)
model.tabulated_rating_curve.static.df.loc[mask, ["level"]] = min_level


model.write(cloud.joinpath("Rijkswaterstaat", "modellen", "hws", "hws.toml"))
# %%
Loading

0 comments on commit a2fd22d

Please sign in to comment.